tensor.py 77.5 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from __future__ import print_function
16
import six
17
from six.moves import reduce
Y
Yu Yang 已提交
18
from ..layer_helper import LayerHelper
19
from ..param_attr import ParamAttr
20
from ..initializer import Initializer
21 22
from ..framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varbase_creator, device_guard, OpProtoHolder
from ..framework import Variable, in_dygraph_mode
23
from ..initializer import Constant
24
from ..core import VarDesc
25
from .. import core
26
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
27
from . import utils
28
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
X
xuwei06 已提交
29
import numpy
30
import warnings
Y
Yu Yang 已提交
31 32

__all__ = [
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
    'create_tensor',
    'create_parameter',
    'create_global_var',
    'cast',
    'tensor_array_to_tensor',
    'concat',
    'sums',
    'assign',
    'fill_constant_batch_size_like',
    'fill_constant',
    'argmin',
    'argmax',
    'argsort',
    'ones',
    'zeros',
    'reverse',
    'has_inf',
    'has_nan',
    'isfinite',
    'range',
    'linspace',
    'full_like',
    'zeros_like',
    'ones_like',
    'diag',
    'eye',
    'kron',
    'arange',
    'full',
    'tril',
    'triu',
Y
Yu Yang 已提交
64 65 66
]


X
xuwei06 已提交
67
def create_tensor(dtype, name=None, persistable=False):
68
    """
W
wangchaochaohu 已提交
69
    Create a variable, which will hold a Tensor with data type dtype.
70 71

    Args:
W
wangchaochaohu 已提交
72 73 74 75
        dtype(string|numpy.dtype): the data type of Tensor to be created, the
            data type is bool, float16, float32, float64, int8, int16, int32 and int64.
        name(string, optional): The default value is None.  Normally there is no need for 
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
Q
update  
qiaolongfei 已提交
76
        persistable(bool): Set the persistable flag of the create tensor.
W
wangchaochaohu 已提交
77
            default value is False.
78 79

    Returns:
W
wangchaochaohu 已提交
80
        Variable: The tensor to be created according to dtype.
81 82 83 84

    Examples:
        .. code-block:: python

85
          import paddle.fluid as fluid
86 87
          tensor = fluid.layers.create_tensor(dtype='float32')
    """
88 89 90 91
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int32',
        'int64'
    ], 'create_tensor')
Y
Yu Yang 已提交
92
    helper = LayerHelper("create_tensor", **locals())
X
xuwei06 已提交
93 94
    return helper.create_variable(
        name=helper.name, dtype=dtype, persistable=persistable)
Y
Yu Yang 已提交
95 96


97 98
def create_parameter(shape,
                     dtype,
X
xuwei06 已提交
99
                     name=None,
100 101 102 103
                     attr=None,
                     is_bias=False,
                     default_initializer=None):
    """
104
    This function creates a parameter. The parameter is a learnable variable, which can have
Y
yuyang18 已提交
105 106 107 108 109
    gradient, and can be optimized.

    NOTE: this is a very low-level API. This API is useful when you create
    operator by your self. instead of using layers.

110 111 112 113 114 115 116
    Parameters:
        shape (list of int): Shape of the parameter
        dtype (str): Data type of the parameter
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
        attr (ParamAttr, optional): Attributes of the parameter
        is_bias (bool, optional): This can affect which default initializer is chosen
117 118 119
                       when default_initializer is None. If is_bias,
                       initializer.Constant(0.0) will be used. Otherwise,
                       Xavier() will be used.
120
        default_initializer (Initializer, optional): Initializer for the parameter
121 122

    Returns:
123
        The created parameter.
Y
yuyang18 已提交
124 125

    Examples:
126 127
        .. code-block:: python

128
            import paddle.fluid as fluid
129 130
            import paddle.fluid.layers as layers
            W = layers.create_parameter(shape=[784, 200], dtype='float32')
131
    """
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
    check_type(shape, 'shape', (list, tuple, numpy.ndarray), 'create_parameter')
    for item in shape:
        if six.PY2:
            check_type(item, 'item of shape',
                       (int, long, numpy.uint8, numpy.int8, numpy.int16,
                        numpy.int32, numpy.int64), 'create_parameter')
        else:
            check_type(item, 'item of shape',
                       (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
                        numpy.int64), 'create_parameter')

    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
        'int64', 'uint8'
    ], 'create_parameter')
    check_type(attr, 'attr', (type(None), ParamAttr), 'create_parameter')
    check_type(default_initializer, 'default_initializer',
               (type(None), Initializer), 'create_parameter')

Q
Qiao Longfei 已提交
151
    helper = LayerHelper("create_parameter", **locals())
152
    if attr is None:
X
xuwei06 已提交
153
        attr = ParamAttr(name=name)
154 155
    return helper.create_parameter(attr, shape,
                                   convert_dtype(dtype), is_bias,
156 157 158
                                   default_initializer)


159 160 161 162 163 164 165
def create_global_var(shape,
                      value,
                      dtype,
                      persistable=False,
                      force_cpu=False,
                      name=None):
    """
166
    This function creates a new tensor variable with value in the global block(block 0).
F
fengjiayi 已提交
167

168 169 170
    Parameters:
        shape (list of int): Shape of the variable
        value (float): The value of the variable. The new created
F
fengjiayi 已提交
171
                      variable will be filled with it.
172 173
        dtype (str): Data type of the variable
        persistable (bool, optional): If this variable is persistable.
F
fengjiayi 已提交
174
                           Default: False
175
        force_cpu (bool, optional): Force this variable to be on CPU.
F
fengjiayi 已提交
176
                         Default: False
177 178
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
179 180

    Returns:
181
        Variable: The created Variable
F
fengjiayi 已提交
182 183 184 185

    Examples:
        .. code-block:: python

186
            import paddle.fluid as fluid
187 188
            import paddle.fluid.layers as layers
            var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32',
189
                                           persistable=True, force_cpu=True, name='new_var')
190
    """
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
    check_type(shape, 'shape', (list, tuple, numpy.ndarray),
               'create_global_var')
    for item in shape:
        if six.PY2:
            check_type(item, 'item of shape',
                       (int, long, numpy.uint8, numpy.int8, numpy.int16,
                        numpy.int32, numpy.int64), 'create_global_var')
        else:
            check_type(item, 'item of shape',
                       (int, numpy.uint8, numpy.int8, numpy.int16, numpy.int32,
                        numpy.int64), 'create_global_var')

    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int16', 'int32',
        'int64', 'uint8'
    ], 'create_global_var')

Q
Qiao Longfei 已提交
208 209
    helper = LayerHelper("global_var", **locals())
    var = helper.create_global_variable(
M
minqiyang 已提交
210 211 212 213 214
        dtype=dtype,
        shape=shape,
        persistable=persistable,
        name=name,
        stop_gradient=True)
M
minqiyang 已提交
215 216 217
    helper.set_variable_initializer(
        var, initializer=Constant(
            value=float(value), force_cpu=force_cpu))
M
minqiyang 已提交
218

Q
Qiao Longfei 已提交
219 220 221
    return var


222
def cast(x, dtype):
Y
Yu Yang 已提交
223
    """
224 225 226
    This OP takes in the Variable :attr:`x` with :attr:`x.dtype` and casts it
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
227 228

    Args:
229 230 231
        x(Variable): An input N-D Tensor with data type bool, float16,
            float32, float64, int32, int64, uint8.
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output:
232
            bool, float16, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
233 234

    Returns:
235
        Variable: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
236 237 238

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
239

240
            import paddle.fluid as fluid
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
            import numpy as np

            place = fluid.core.CPUPlace()

            x_lod = fluid.data(name="x", shape=[2,2], lod_level=0)
            cast_res1 = fluid.layers.cast(x=x_lod, dtype="uint8")
            cast_res2 = fluid.layers.cast(x=x_lod, dtype=np.int32)

            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())

            x_i_lod = fluid.core.LoDTensor()
            x_i_lod.set(np.array([[1.3,-2.4],[0,4]]).astype("float32"), place)
            x_i_lod.set_recursive_sequence_lengths([[0,2]])
            res1 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res1], return_numpy=False)
            res2 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res2], return_numpy=False)
            print(np.array(res1[0]), np.array(res1[0]).dtype)
            # [[  1 254]
            #  [  0   4]] uint8
            print(np.array(res2[0]), np.array(res2[0]).dtype)
            # [[ 1 -2]
            #  [ 0  4]] int32
Y
Yu Yang 已提交
263
    """
264 265
    check_variable_and_dtype(
        x, 'x',
266 267
        ['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
        'cast')
268 269 270 271 272 273
    check_dtype(dtype, 'dtype', [
        'bool', 'float16', 'float32', 'float64', 'int8', 'int32', 'int64',
        'uint8'
    ], 'cast')

    helper = LayerHelper('cast', **locals())
X
Xin Pan 已提交
274
    out = helper.create_variable_for_type_inference(dtype=dtype)
Y
Yu Yang 已提交
275 276 277 278 279 280 281 282 283
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype,
               'out_dtype': out.dtype})
    return out


284
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
285
    """
286 287
    **Concat**

288
    This OP concatenates the input along the axis.
289 290

    Args:
291 292
        input(list): List of input Tensors with data type float32, float64, int32,
            int64.
293
        axis(int32|Variable, optional):  A scalar with type ``int32`` or a ``Tensor`` with shape [1] and type ``int32``. Axis to compute indices along. The effective range
294 295 296 297 298
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
299 300

    Returns:
301
        Variable: A Tensor with the same data type as input's.
302 303 304

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
305

306
            import paddle.fluid as fluid
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
            import numpy as np

            in1 = np.array([[1,2,3],
                            [4,5,6]])
            in2 = np.array([[11,12,13],
                            [14,15,16]])
            in3 = np.array([[21,22],
                            [23,24]])
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
                out1 = fluid.layers.concat(input=[x1,x2,x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1,x2], axis=0)
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
329
    """
330 331

    if in_dygraph_mode():
S
songyouwei 已提交
332 333 334 335 336
        if isinstance(axis, Variable):
            axis = axis.numpy()
            assert axis.shape == (
                1, ), "axis of type Variable should have shape [1]"
            axis = axis[0]
337
        return core.ops.concat(input, 'axis', axis)
338

339 340 341 342 343
    if not isinstance(input, list):
        warnings.warn(
            "The type of input in concat should be list, but received %s." %
            (type(input)))
        input = [input]
344
    for id, x in enumerate(input):
345 346
        check_variable_and_dtype(
            x, 'input[' + str(id) + ']',
347 348
            ['float16', 'float32', 'float64', 'int32', 'int64'], 'concat')
    check_type(axis, 'axis', (int, Variable), 'concat')
349

350
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
351
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
        assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \
                            "number of the elements must be 1, but received %s." % len(x)
        out_index = helper.create_variable_for_type_inference(dtype="int32")
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': input[0]},
            outputs={'Out': [out],
                     'OutIndex': [out_index]},
            attrs={'axis': axis,
                   'use_stack': False})
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
            inputs['AxisTensor'] = axis
        else:
            attrs['axis'] = axis

        helper.append_op(
            type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
Y
Yu Yang 已提交
375 376 377
    return out


G
Guo Sheng 已提交
378
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
L
li099 已提交
379
    """
G
Guo Sheng 已提交
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
    This function concatenates or stacks all tensors in the input LoDTensorArray
    along the axis mentioned and returns that as the output.

    For Example:

    .. code-block:: text

        Case 1:

            Given:

                input.data = {[[0.6, 0.1, 0.3],
                               [0.5, 0.3, 0.2]],
                              [[1.3],
                               [1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = False

            Then:

                output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                               [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

                output_index.data = [3, 1, 2]

        Case 2:

            Given:

                input.data = {[[0.6, 0.1],
                               [0.5, 0.3]],
                              [[0.3, 1.3],
                               [0.2, 1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = True

            Then:

                output.data = [[[0.6, 0.1]
                                [0.3, 1.3]
                                [2.3, 2.1],
                               [[0.5, 0.3]
                                [0.2, 1.8]
                                [2.5, 2.4]]]

                output_index.data = [2, 2, 2]
L
li099 已提交
430 431

    Args:
G
Guo Sheng 已提交
432 433 434 435 436 437 438
        input(Variable): A LodTensorArray variable.
        axis(int): The axis along which the tensors in attr::`input` will be
            concatenated or stacked.
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
        use_stack(bool): Act as concat_op or stack_op. For stack mode, all
            tensors in the tensor array must have the same shape.
L
li099 已提交
439 440

    Returns:
G
Guo Sheng 已提交
441 442 443
        Variable: The concatenated or stacked tensor variable.
        Variable: A 1-D tensor variable with int32 data type. The data in this \
            tensor contains all input including tensors' sizes along the axis.
L
li099 已提交
444 445 446 447

    Examples:
        .. code-block:: python

448
            import paddle.fluid as fluid
449
            import numpy as np
G
Guo Sheng 已提交
450 451 452 453 454 455 456
            x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
            array = fluid.layers.create_array(dtype='float32')
            fluid.layers.array_write(x0, i, array)
            fluid.layers.array_write(x1, i + 1, array)
            output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
L
li099 已提交
457
    """
458 459 460 461 462 463 464 465 466 467 468
    if in_dygraph_mode():
        assert isinstance(
            input, list), "The 'input' in tensor_array_to_tensor must be list"
        from .nn import stack, concat
        from ..dygraph import to_variable
        op = stack if use_stack else concat
        res = op(input, axis=axis)
        sizes = to_variable(
            numpy.array(list(map(lambda x: int(x.shape[axis]), input))))
        return res, sizes

469 470 471 472 473
    check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
    if isinstance(input, list):
        for i, input_x in enumerate(input):
            check_type(input_x, 'input[' + str(i) + ']', Variable,
                       'tensor_array_to_tensor')
L
li099 已提交
474
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
475 476 477
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
    helper.append_op(
L
li099 已提交
478
        type='tensor_array_to_tensor',
L
li099 已提交
479 480 481
        inputs={'X': input},
        outputs={'Out': [out],
                 'OutIndex': [out_index]},
G
Guo Sheng 已提交
482 483
        attrs={'axis': axis,
               'use_stack': use_stack})
L
li099 已提交
484 485 486
    return out, out_index


487
def sums(input, out=None):
F
fengjiayi 已提交
488
    """
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
510 511

    Args:
512 513 514 515
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
516 517

    Returns:
518 519
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
520 521

    Examples:
F
fengjiayi 已提交
522
        .. code-block:: python
K
kavyasrinet 已提交
523

524 525 526 527 528 529 530 531 532
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
533

534 535
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
536
    """
537 538 539 540 541 542 543 544 545
    check_type(input, 'input', (Variable, tuple, list), 'sums')
    if isinstance(input, list) or isinstance(input, tuple):
        for input_section in input:
            check_variable_and_dtype(input_section, "input", \
                    ['float32', 'float64', 'int32', 'int64'], 'sums')
    else:
        check_variable_and_dtype(input, "input", \
                ['float32', 'float64', 'int32', 'int64'], 'sums')

Y
Yu Yang 已提交
546 547
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
548 549
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
550 551 552 553
    else:
        check_variable_and_dtype(
            out, "out", ['float32', 'float64', 'int32', 'int64'], 'sums')

T
tensor-tang 已提交
554 555 556 557 558
    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False})
Y
Yu Yang 已提交
559 560 561
    return out


F
fengjiayi 已提交
562
def assign(input, output=None):
563
    """
564
    The OP copies the :attr:`input` to the :attr:`output`.
565

566 567 568 569 570
    Parameters:
        input (Variable|numpy.ndarray): A tensor or numpy ndarray, its data type supports
            float32, float64, int32 and int64.
        output (Variable, optional): A tensor. If :attr:`output` is None, a new tensor will
            be created as :attr:`output`. Default: None.
571 572

    Returns:
573
        Variable: A tensor with the same shape, data type and value as :attr:`input`.
574 575 576

    Examples:
        .. code-block:: python
577

578
          import paddle.fluid as fluid
579 580 581 582 583 584
          import numpy as np
          data = fluid.layers.fill_constant(shape=[3, 2], value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result1 = fluid.layers.create_tensor(dtype='float64')
          fluid.layers.assign(data, result1) # result1 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result2 = fluid.layers.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = fluid.layers.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
585
    """
Y
Yu Yang 已提交
586
    helper = LayerHelper('assign', **locals())
587
    check_type(input, 'input', (Variable, numpy.ndarray), 'assign')
X
xuwei06 已提交
588
    if isinstance(input, Variable):
589 590 591
        check_dtype(input.dtype, 'input',
                    ['float32', 'float64', 'int32', 'int64', 'bool'], 'assign',
                    '(When the type of input in assign is Variable.)')
592 593 594
        if output is None:
            output = helper.create_variable_for_type_inference(
                dtype=input.dtype)
X
xuwei06 已提交
595
        helper.append_op(
R
robot 已提交
596
            type='assign', inputs={'X': [input]}, outputs={'Out': [output]})
X
xuwei06 已提交
597 598
    elif isinstance(input, numpy.ndarray):
        dtype = convert_np_dtype_to_dtype_(input.dtype)
599 600 601 602
        if dtype == VarDesc.VarType.BOOL:
            value_name = "bool_values"
            values = [bool(v) for v in input.flat]
        elif dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
603
            value_name = "fp32_values"
604
            values = [float(v) for v in input.flat]
605
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
606
            value_name = "int32_values"
607
            values = [int(v) for v in input.flat]
608 609 610
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
611
        else:
612 613
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
614
                "the data type of 'input' must be bool, float32, int32 or int64, but "
615
                "received %s." % convert_dtype(dtype))
616 617 618
        if input.size > 1024 * 1024:
            raise ValueError("The size of input is too big. Please consider "
                             "saving it to file and 'load_op' to load it")
619 620 621
        if output is None:
            output = helper.create_variable_for_type_inference(
                dtype=input.dtype)
X
xuwei06 已提交
622 623 624 625 626 627
        helper.append_op(
            type='assign_value',
            outputs={'Out': [output]},
            attrs={
                'dtype': dtype,
                'shape': list(input.shape),
628
                value_name: values
X
xuwei06 已提交
629 630
            })

Y
Yu Yang 已提交
631 632 633
    return output


Q
QI JUN 已提交
634
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
Y
Yu Yang 已提交
635
    """
W
wangchaochaohu 已提交
636
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
637
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
638

T
tianshuo78520a 已提交
639
    The attribute `stop_gradient` of the created Tensor is set to True.
640 641

    Args:
642 643 644 645
        shape(list|tuple|Variable): Shape of the Tensor to be created.
                The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
                the elements of it should be integers or Tensors with shape [1].
                If ``shape`` is an Variable, it should be an 1-D Tensor .
W
wangchaochaohu 已提交
646 647
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can
            be float16, float32, float64, int32, int64.
W
wangchaochaohu 已提交
648 649 650
        value(float16|float32|float64|int32|int64|Variable): The constant value used to initialize 
            the Tensor to be created. If value is an Variable, it should be an 1-D Tensor.
        force_cpu(bool): data should be on CPU if it's true, default value is False.
W
wangchaochaohu 已提交
651 652 653
        out(Variable, optional): Optional output which can be any created 
            Variable that meets the requirements to store the result of operation.
            if out is None, a new Varibale will be create to store the result.
654 655

    Returns:
W
wangchaochaohu 已提交
656 657 658 659 660
        Variable: Tensor which is created according to shape and dtype.

    Raise:
        TypeError: The dtype must be one of bool, float16, float32, float64, int32 and int64
        and the data type of out Tensor must be the same as the dtype. 
661 662 663 664

    Examples:
        .. code-block:: python

665
          import paddle.fluid as fluid
666 667 668
          # attr shape is a list which doesn't contain Variable Tensor.
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
669
          # data1=[[5], [5]] data2=[[5], [5]]
670 671 672 673 674 675 676 677

          # attr shape is a list which contains Variable Tensor.
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5]

          # attr shape is an Variable Tensor.
          shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
W
wangchaochaohu 已提交
678 679 680 681
          
          # attr value is an Variable Tensor.
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
Y
Yu Yang 已提交
682
    """
W
wangchaochaohu 已提交
683 684 685 686
    inputs = {}
    attrs = {'force_cpu': force_cpu}
    if isinstance(value, Variable):
        inputs['ValueTensor'] = value
687
    else:
W
wangchaochaohu 已提交
688 689 690 691 692
        attrs['value'] = float(value)
        if convert_dtype(dtype) in ['int64', 'int32']:
            attrs['str_value'] = str(int(value))
        else:
            attrs['str_value'] = str(float(value))
693 694 695

    if in_dygraph_mode():
        if isinstance(shape, (list, tuple)):
S
songyouwei 已提交
696 697 698
            shape = list(
                map(lambda x: x.numpy()[0] if isinstance(x, Variable) else x,
                    shape))
699
        else:
S
songyouwei 已提交
700
            shape = list(shape.numpy().astype(int))
701 702
        if out is None:
            out = _varbase_creator(dtype=dtype)
W
wangchaochaohu 已提交
703 704 705 706 707 708 709

        if isinstance(value, Variable):
            if convert_dtype(dtype) in ['int64', 'int32']:
                attrs['str_value'] = str(int(value.numpy()))
            else:
                attrs['str_value'] = str(float(value.numpy()))

710 711
        core.ops.fill_constant(out, 'value',
                               float(value), 'force_cpu', force_cpu, 'dtype',
712 713
                               out.dtype, 'str_value', attrs['str_value'],
                               'shape', shape)
714 715 716
        out.stop_gradient = True
        return out

717
    check_dtype(dtype, 'dtype',
718 719 720
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'fill_constant')
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
721 722 723 724 725 726 727 728
    if isinstance(shape, Variable):
        check_variable_and_dtype(shape, 'shape', ['int32', 'int64'],
                                 'fill_constant')
    if out is not None:
        check_variable_and_dtype(out, 'out', [convert_dtype(dtype)],
                                 'fill_constant')

    helper = LayerHelper("fill_constant", **locals())
W
wangchaochaohu 已提交
729 730 731 732 733 734
    inputs = utils._get_shape_tensor_inputs(
        inputs=inputs,
        helper=helper,
        attrs=attrs,
        shape=shape,
        op_type='fill_constant')
L
liym27 已提交
735

Y
Yu Yang 已提交
736
    if out is None:
X
Xin Pan 已提交
737
        out = helper.create_variable_for_type_inference(dtype=dtype)
L
liym27 已提交
738
    attrs['dtype'] = out.dtype
Y
Yu Yang 已提交
739 740
    helper.append_op(
        type='fill_constant',
L
liym27 已提交
741
        inputs=inputs,
Y
Yu Yang 已提交
742
        outputs={'Out': [out]},
L
liym27 已提交
743
        attrs=attrs,
M
minqiyang 已提交
744
        stop_gradient=True)
Y
Yu Yang 已提交
745 746 747 748
    out.stop_gradient = True
    return out


Y
yuyang18 已提交
749
@templatedoc()
Y
Yu Yang 已提交
750 751 752 753 754
def fill_constant_batch_size_like(input,
                                  shape,
                                  dtype,
                                  value,
                                  input_dim_idx=0,
G
Guo Sheng 已提交
755 756
                                  output_dim_idx=0,
                                  force_cpu=False):
757
    """
T
tianshuo78520a 已提交
758
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
759 760 761 762
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
763 764

    Args:
W
wangchaochaohu 已提交
765 766 767 768 769 770 771 772 773 774 775
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
        value(float|int): The constant value used to initialize the Tensor to be created. 
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
776
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
777 778

    Returns:
W
wangchaochaohu 已提交
779
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
780 781 782 783 784

    Examples:

        .. code-block:: python

785
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
786
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
787
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
788
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
789

790
    """
Y
Yu Yang 已提交
791
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
792
    out = helper.create_variable_for_type_inference(dtype=dtype)
793 794 795 796 797 798
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
799
        'force_cpu': force_cpu
800 801 802 803 804
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
Y
Yu Yang 已提交
805 806 807 808
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
809
        attrs=attrs)
Y
Yu Yang 已提交
810 811 812 813
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
814 815 816 817
def argmin(x, axis=0):
    """
    **argmin**

818 819
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
820 821

    Args:
822 823 824 825 826
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
827

S
sneaxiy 已提交
828
    Returns:
829
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
830

S
sneaxiy 已提交
831 832
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
833

834
            import paddle.fluid as fluid
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
862
    """
863 864 865
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmin')
S
sneaxiy 已提交
866
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
867
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
868 869 870 871 872
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
873
    out.stop_gradient = True
S
sneaxiy 已提交
874 875 876 877 878 879 880
    return out


def argmax(x, axis=0):
    """
    **argmax**

881 882
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
883 884

    Args:
885 886 887 888 889
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
890

S
sneaxiy 已提交
891
    Returns:
892
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
893

S
sneaxiy 已提交
894 895
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
896

897
            import paddle.fluid as fluid
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
925
    """
926 927 928
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmax')
S
sneaxiy 已提交
929
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
930
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
931 932 933 934 935
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
936
    out.stop_gradient = True
S
sneaxiy 已提交
937 938 939
    return out


940
def argsort(input, axis=-1, descending=False, name=None):
Y
Yibing Liu 已提交
941
    """
942 943 944
    This OP sorts the input along the given axis, and returns sorted output
    data Varibale and its corresponding index Variable with the same shape as
    :attr:`input`.
Y
Yibing Liu 已提交
945 946

    Args:
947 948 949 950 951
        input(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
952 953 954
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
955 956 957
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
Y
Yibing Liu 已提交
958 959

    Returns:
960 961 962
        tuple: A tuple of sorted data Variable(with the same shape and data
        type as input) and the sorted indices(with the same shape as input's
        and with data type int64).
Y
Yibing Liu 已提交
963 964 965 966

    Examples:
        .. code-block:: python

967
            import paddle.fluid as fluid
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]]).astype(np.float32)
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argsort(input=x, axis=-1)
                out2 = fluid.layers.argsort(input=x, axis=0)
                out3 = fluid.layers.argsort(input=x, axis=1)
                print(out1[0].numpy())
                # [[[5. 5. 8. 9.]
                #   [0. 0. 1. 7.]
                #   [2. 4. 6. 9.]]
                #  [[2. 2. 4. 5.]
                #   [4. 7. 7. 9.]
                #   [0. 1. 6. 7.]]]
                print(out1[1].numpy())
                # [[[0 3 1 2]
                #   [0 1 2 3]
                #   [2 3 0 1]]
                #  [[1 3 2 0]
                #   [0 1 2 3]
                #   [2 0 3 1]]]
                print(out2[0].numpy())
                # [[[5. 2. 4. 2.]
                #   [0. 0. 1. 7.]
                #   [1. 7. 0. 4.]]
                #  [[5. 8. 9. 5.]
                #   [4. 7. 7. 9.]
                #   [6. 9. 2. 6.]]]
                print(out3[0].numpy())
                # [[[0. 0. 1. 4.]
                #   [5. 8. 2. 5.]
                #   [6. 9. 9. 7.]]
                #  [[1. 2. 0. 2.]
                #   [4. 7. 4. 6.]
                #   [5. 7. 7. 9.]]]
Y
Yibing Liu 已提交
1009
    """
1010 1011 1012
    check_variable_and_dtype(
        input, 'input',
        ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'], 'argsort')
Y
Yibing Liu 已提交
1013
    helper = LayerHelper("argsort", **locals())
X
Xin Pan 已提交
1014 1015 1016 1017
    out = helper.create_variable_for_type_inference(
        dtype=input.dtype, stop_gradient=True)
    ids = helper.create_variable_for_type_inference(
        VarDesc.VarType.INT64, stop_gradient=True)
Y
Yibing Liu 已提交
1018 1019 1020 1021
    helper.append_op(
        type='argsort',
        inputs={'X': input},
        outputs={'Out': out,
1022
                 'Indices': ids},
1023 1024
        attrs={'axis': axis,
               'descending': descending})
Y
Yibing Liu 已提交
1025 1026 1027
    return out, ids


Y
Yang Yu 已提交
1028
def ones(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
1029
    """
1030 1031
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1032

1033 1034 1035 1036 1037 1038 1039
    Parameters:
        shape (tuple|list): Shape of output tensor.
        dtype (np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
            bool, float16, float32, float64, int32 and int64.
        force_cpu (bool, optional): Whether force to store the output tensor in CPU memory.
            If :attr:`force_cpu` is False, the output tensor will be stored in running device memory.
            Default: False.
1040 1041

    Returns:
1042
        Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
1043 1044 1045 1046

    Examples:
        .. code-block:: python

1047
          import paddle.fluid as fluid
1048
          data = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]]
Y
Yu Yang 已提交
1049
    """
1050 1051 1052 1053
    check_type(shape, 'shape', (list, tuple), 'ones')
    check_dtype(dtype, 'create data type',
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'ones')
C
chengduozh 已提交
1054 1055
    assert reduce(lambda x, y: x * y,
                  shape) > 0, "The shape is invalid: %s." % (str(shape))
Y
Yu Yang 已提交
1056 1057 1058
    return fill_constant(value=1.0, **locals())


Y
Yang Yu 已提交
1059
def zeros(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
1060
    """
1061 1062
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1063

1064 1065 1066 1067 1068 1069 1070
    Parameters:
        shape (tuple|list): Shape of output tensor.
        dtype (np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
            bool, float16, float32, float64, int32 and int64.
        force_cpu (bool, optional): Whether force to store the output tensor in CPU memory.
            If :attr:`force_cpu` is False, the output tensor will be stored in running device memory.
            Default: False.
1071 1072

    Returns:
1073
        Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
1074 1075 1076 1077

    Examples:
        .. code-block:: python

1078
          import paddle.fluid as fluid
1079
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
Y
Yu Yang 已提交
1080
    """
1081
    check_type(shape, 'shape', (list, tuple), 'zeros')
1082 1083 1084
    check_dtype(dtype, 'create data type',
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'zeros')
Y
Yu Yang 已提交
1085
    return fill_constant(value=0.0, **locals())
1086 1087


F
fengjiayi 已提交
1088 1089
def reverse(x, axis):
    """
1090
    The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
F
fengjiayi 已提交
1091

1092 1093 1094 1095 1096
    Parameters:
        x (Variable): A tensor to be reversed, its data type supports bool, float32, float64, int32, int64 and uint8.
        axis (int|tuple|list): A dimension or a set of dimensions of :attr:`x` to reverse. Must be
            in the range [-rank( :attr:`x` ), rank( :attr:`x` )). If it is a tuple or a list, reversing
            will be apply on each axis in the tuple or list.
F
fengjiayi 已提交
1097 1098

    Returns:
1099
        Variable: The reversed tensor with the same shape and data type as :attr:`x`.
F
fengjiayi 已提交
1100 1101 1102 1103

    Examples:
        .. code-block:: python

1104
          import paddle.fluid as fluid
1105 1106 1107 1108
          import numpy as np
          data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
          result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
          result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
F
fengjiayi 已提交
1109 1110 1111 1112
    """
    if isinstance(axis, int):
        axis = [axis]
    helper = LayerHelper("reverse", **locals())
X
Xin Pan 已提交
1113
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
F
fengjiayi 已提交
1114 1115
    helper.append_op(
        type='reverse',
W
Wu Yi 已提交
1116
        inputs={'X': x},
F
fengjiayi 已提交
1117 1118 1119 1120 1121
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


1122 1123 1124 1125 1126 1127 1128
def save(x, file_path, overwrite=True):
    """
    Saves a variable as a file.

    Args:
        x(variable): The Tensor/LoDTensor to be saved.
        file_path(str): The file path where the variable will be saved.
1129 1130 1131
        overwrite(bool): Whether or not cover the given file when it has already
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
    """
    helper = LayerHelper("save", **locals())
    helper.append_op(
        type="save",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def save_combine(x, file_path, overwrite=True):
    """
    Saves a list of variables into a single file.

    Args:
1147 1148
        x(list): A list of Tensor/LoDTensor variables to be saved together in
                 a single file.
1149
        file_path(str): The file path where variables will be saved.
1150
        overwrite(bool): Whether or not cover the given file when it has already
1151 1152
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
1153 1154 1155 1156 1157 1158 1159 1160

    Returns:
        There is no return value.

    Examples:

        .. code-block:: python

1161
            import paddle.fluid as fluid
1162 1163 1164 1165 1166 1167 1168
            v1 = fluid.layers.data(name="data",
                                   shape=(4, 6),
                                   dtype="float32")
            v2 = fluid.layers.data(name="data",
                                   shape=(6, 8, 4),
                                   dtype="float32")
            normed = fluid.layers.save_combine([v1, v2], file_path="output")
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
    """
    helper = LayerHelper("save_combine", **locals())
    helper.append_op(
        type="save_combine",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def load_combine(out, file_path):
    """
T
tianshuo78520a 已提交
1181
    Loads a list of variable from a single file.
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192

    Args:
        out(list): The list of variables to be read from the disk file.
        file_path(str): The path of the disk file.
    """
    helper = LayerHelper("load_combine", **locals())
    helper.append_op(
        type="load_combine",
        inputs={},
        output={"Out": out},
        args={"file_path": file_path})
1193 1194 1195 1196 1197 1198 1199


def has_inf(x):
    """
    Test if any of x contains an infinity number

    Args:
L
liu zhengxi 已提交
1200
       x (Variable): The Tensor/LoDTensor to be checked.
1201 1202

    Returns:
L
liu zhengxi 已提交
1203
       Variable: The tensor variable storing the output, only a bool value, indicating that whether there is infinity number in x or not.
1204 1205 1206 1207 1208 1209 1210 1211
    
    Examples:
        .. code-block:: python
          
          import paddle.fluid as fluid
          data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
          res = fluid.layers.has_inf(data)

1212
    """
1213
    check_type(x, 'x', (Variable), 'has_inf')
1214
    helper = LayerHelper("isinf", **locals())
X
Xin Pan 已提交
1215
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1216 1217 1218 1219 1220 1221 1222 1223 1224
    helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
    return out


def has_nan(x):
    """
    Test if any of x contains a NAN

    Args:
L
liu zhengxi 已提交
1225
       x (Variable): The Tensor/LoDTensor to be checked.
1226 1227

    Returns:
L
liu zhengxi 已提交
1228
       Variable: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not.
1229 1230 1231 1232 1233 1234 1235 1236
    
    Examples:
        .. code-block:: python
    
          import paddle.fluid as fluid
          data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
          res = fluid.layers.has_nan(data)

1237
    """
1238
    check_type(x, 'x', (Variable), 'has_nan')
1239
    helper = LayerHelper("isnan", **locals())
X
Xin Pan 已提交
1240
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
    helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
    return out


def isfinite(x):
    """
    Test if any of x contains an infinity/NAN number. If all the elements are finite,
    returns true, else false.

    Args:
       x(variable): The Tensor/LoDTensor to be checked.

    Returns:
        Variable: The tensor variable storing the output, contains a bool value.
1255 1256 1257 1258 1259

    Examples:

        .. code-block:: python

1260
            import paddle.fluid as fluid
1261 1262 1263
            var = fluid.layers.data(name="data",
                                    shape=(4, 6),
                                    dtype="float32")
石晓伟 已提交
1264
            out = fluid.layers.isfinite(var)
1265
    """
1266 1267
    check_variable_and_dtype(x, "x", ["float32", "float64", "int32", "int64"],
                             "isfinite")
1268
    helper = LayerHelper("isfinite", **locals())
1269

1270
    out = helper.create_variable_for_type_inference(dtype='bool')
1271 1272
    helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
    return out
W
whs 已提交
1273 1274 1275 1276 1277 1278 1279 1280 1281


def range(start, end, step, dtype):
    """
    Return evenly spaced values within a given interval.

    Values are generated within the half-open interval [start, stop) (in other words,
    the interval including start but excluding stop).

L
Liufang Sang 已提交
1282 1283 1284 1285
    Parameters:
        start(float32 | float64 | int32 | int64 | Variable): Start of interval. The interval includes this value.
            when start is Variable, it is a 1-D Tensor with shape [1].
        end(float32 | float64 | int32 | int64 | Variable): End of interval. The interval does not include this
W
whs 已提交
1286
                                 value, except in some cases where step is not an integer
L
Liufang Sang 已提交
1287 1288 1289
                                 and floating point round-off affects the length of out. When end is Variable,
                                 it is a 1-D Tensor with shape [1].
        step(float32 | float64 | int32 | int64 | Variable): Spacing between values. For any output out, this is the
W
whs 已提交
1290
                                  distance between two adjacent values, out[i+1] - out[i].
1291
        dtype(str|core.VarDesc.VarType): the data type of the output tensor, can be float32, float64, int32, int64.
W
whs 已提交
1292

L
Liufang Sang 已提交
1293 1294 1295
    Returns: a 1-D Tensor which is evenly spaced values within a given interval. Its data type is set by dtype.
    
    Return type: Variable
W
whs 已提交
1296 1297 1298 1299 1300

    examples:

        .. code-block:: python

1301
             import paddle.fluid as fluid
W
whs 已提交
1302 1303 1304 1305 1306
             data = fluid.layers.range(0, 10, 2, 'int32')

    """
    helper = LayerHelper("range", **locals())

1307 1308 1309 1310
    check_dtype(dtype, 'create data type',
                ['float32', 'float64', 'int32', 'int64'], 'range')

    dtype = convert_dtype(dtype)
W
whs 已提交
1311 1312
    if not isinstance(start, Variable):
        start = fill_constant([1], dtype, start)
1313 1314 1315 1316 1317
    elif convert_dtype(start.dtype) != dtype:
        # make sure that start, end, step has the same dtype as
        # `dtype`
        start = cast(x=start, dtype=dtype)

W
whs 已提交
1318 1319
    if not isinstance(end, Variable):
        end = fill_constant([1], dtype, end)
1320 1321 1322
    elif convert_dtype(end.dtype) != dtype:
        end = cast(x=end, dtype=dtype)

W
whs 已提交
1323 1324
    if not isinstance(step, Variable):
        step = fill_constant([1], dtype, step)
1325 1326
    elif convert_dtype(step.dtype) != dtype:
        step = cast(x=step, dtype=dtype)
W
whs 已提交
1327 1328 1329 1330 1331 1332 1333 1334 1335

    out = helper.create_variable_for_type_inference(dtype=start.dtype)

    helper.append_op(
        type='range',
        inputs={'Start': start,
                'End': end,
                'Step': step},
        outputs={'Out': [out]})
1336
    out.stop_gradient = True
W
whs 已提交
1337
    return out
Z
zhoukunsheng 已提交
1338 1339


Z
zhoukunsheng 已提交
1340 1341
def linspace(start, stop, num, dtype):
    """
1342
    This OP return fixed number of evenly spaced values within a given interval.
Z
zhoukunsheng 已提交
1343 1344

    Args:
1345 1346 1347 1348 1349 1350 1351
        start(float|Variable): The input :attr:`start` is start variable of range. It is a float scalar, \
            or a tensor of shape [1] with input data type float32, float64.
        stop(float|Variable): The input :attr:`stop` is start variable of range. It is a float scalar, \
            or a tensor of shape [1] with input data type float32, float64.
        num(int|Variable): The input :attr:`num` is given num of the sequence. It is an int scalar, \
            or a tensor of shape [1] with type int32.
        dtype(string): The data type of output tensor, it could be 'float32' and 'float64'.
Z
zhoukunsheng 已提交
1352 1353

    Returns:
1354 1355 1356
        Variable, the output data type will be float32, float64.: The 1-D tensor with fixed number of evenly spaced values, \
        the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
        the value with input :attr:`start`. 
Z
zhoukunsheng 已提交
1357

Z
zhoukunsheng 已提交
1358
    Examples:
Z
zhoukunsheng 已提交
1359 1360
        .. code-block:: python

1361
             import paddle.fluid as fluid
Z
zhoukunsheng 已提交
1362 1363
             data = fluid.layers.linspace(0, 10, 5, 'float32') # [0.0,  2.5,  5.0,  7.5, 10.0]
             data = fluid.layers.linspace(0, 10, 1, 'float32') # [0.0]
Z
zhoukunsheng 已提交
1364 1365 1366 1367

    """
    helper = LayerHelper("linspace", **locals())

1368 1369 1370 1371
    check_type(start, 'start', (Variable, float, int), linspace)
    check_type(stop, 'stop', (Variable, float, int), linspace)
    check_type(num, 'num', (Variable, float, int), linspace)

Z
zhoukunsheng 已提交
1372 1373
    if not isinstance(start, Variable):
        start = fill_constant([1], dtype, start)
1374 1375 1376 1377
    else:
        check_variable_and_dtype(start, "start", ["float32", "float64"],
                                 "linspace")

Z
zhoukunsheng 已提交
1378 1379
    if not isinstance(stop, Variable):
        stop = fill_constant([1], dtype, stop)
1380 1381 1382
    else:
        check_variable_and_dtype(stop, "stop", ["float32", "float64"],
                                 "linspace")
Z
zhoukunsheng 已提交
1383 1384
    if not isinstance(num, Variable):
        num = fill_constant([1], 'int32', num)
1385 1386
    else:
        check_variable_and_dtype(num, "num", ["int32"], "linspace")
Z
zhoukunsheng 已提交
1387 1388 1389 1390 1391 1392 1393 1394 1395 1396

    out = helper.create_variable_for_type_inference(dtype=start.dtype)

    helper.append_op(
        type='linspace',
        inputs={'Start': start,
                'Stop': stop,
                'Num': num},
        outputs={'Out': [out]})
    return out
1397 1398


1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
def full_like(input,
              fill_value,
              out=None,
              dtype=None,
              device=None,
              stop_gradient=True,
              name=None):
    """
    **full_like**
    This function creates a tensor filled with `fill_value` which has identical shape and dtype 
    with `input`.

    Args:
        input(Variable): The input tensor which specifies shape and data type. The data type can be bool, float16, float32, float64, int32, int64.
        fill_value(bool|float|int): The value to fill the tensor with. Default value is 0. Note: this value shouldn't exceed the range of the output data type.
        out(Variable, optional): Optional output which can be any created Variable that meets the requirements to store the result of operation. If out is None, a new Varibale will be create to store the result. Default value is None.
        dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output. The default value is None, which means the output data type is the same as input.
        device (string, optional): Which device to run the operator. The :attr:`device` must be None, 'cpu', 'gpu'. If :attr:`device` is None, it will be the device that the user set in the paddle program. Default value is None.
        stop_gradient(bool, optional): Indicating if we stop gradient from current(out) Variable. Default value is True.
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
    
    Returns:
        out(Variable): The Tensor variable storing the output.
    
    Examples:
        .. code-block:: python

          import paddle
          import paddle.fluid as fluid
          import numpy as np
          input = fluid.data(name='input', dtype='float32', shape=[2, 3])
          output = fluid.layers.full_like(input, 2.0)
          exe = fluid.Executor(fluid.CPUPlace())
          exe.run(fluid.default_startup_program())
          img=np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
          res = exe.run(fluid.default_main_program(), feed={'input':img}, fetch_list=[output])
          print(res) # [array([[2., 2., 2.], [2., 2., 2.]], dtype=float32)]
    """
    helper = LayerHelper("full_like", **locals())

    var_dtype = None
    if dtype is None:
        var_dtype = input.dtype
    else:
        check_dtype(
            dtype, 'dtype',
            ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
            'full_like')
        var_dtype = convert_np_dtype_to_dtype_(dtype)

    if out is None:
        out = helper.create_variable_for_type_inference(dtype=dtype)

    helper.append_op(
        type='fill_any_like',
        inputs={'X': [input]},
        attrs={'value': fill_value,
               "dtype": var_dtype},
        outputs={'Out': [out]})
    out.stop_gradient = stop_gradient

    return out


Z
zhoukunsheng 已提交
1463 1464
def zeros_like(x, out=None):
    """
1465
    This OP creates a zeros tensor which has identical shape and dtype 
Z
zhoukunsheng 已提交
1466 1467 1468
    with `x`.

    Args:
1469 1470 1471
        x(Variable): The input tensor which specifies shape and dtype, the input data dtype could be bool, float32, float64, int32, int64.
        out(Variable, optional): If is :attr:`None` , the op will create the variable as output, the data type and shape of \
            this variable will be same as input :attr:`x`. If is a tensor, the data type and shape need to be same as input :attr:`x`. 
T
tianshuo78520a 已提交
1472
            The default value is :attr:`None` .
Z
zhoukunsheng 已提交
1473 1474

    Returns:
1475 1476
        Variable: The N-D tensor, the element in tensor is related to input data type, if the input data type is bool, \
            the output value is False, otherwise is zero. The output shape is the same as the input.
Z
zhoukunsheng 已提交
1477 1478 1479 1480

    Examples:
        .. code-block:: python

1481
          import paddle.fluid as fluid
1482
          x = fluid.data(name='x', dtype='float32', shape=[3])
Z
zhoukunsheng 已提交
1483 1484
          data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]

Z
zhoukunsheng 已提交
1485 1486
    """

1487 1488
    check_variable_and_dtype(
        x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like')
Z
zhoukunsheng 已提交
1489 1490 1491
    helper = LayerHelper("zeros_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1492 1493 1494 1495 1496
    else:
        check_variable_and_dtype(
            out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
            'ones_like')

Z
zhoukunsheng 已提交
1497 1498 1499 1500
    helper.append_op(
        type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]})
    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1501 1502 1503 1504


def diag(diagonal):
    """
1505
    This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Z
zhoukunsheng 已提交
1506 1507

    Args:
1508 1509
        diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
            specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Z
zhoukunsheng 已提交
1510 1511

    Returns:
1512 1513
        Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
            the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Z
zhoukunsheng 已提交
1514 1515 1516 1517 1518 1519 1520

    Examples:
        .. code-block:: python

          # [[3, 0, 0]
          #  [0, 4, 0]
          #  [0, 0, 5] 
1521 1522 1523

          import paddle.fluid as fluid
          import numpy as np
1524 1525 1526
          diagonal = np.arange(3, 6, dtype='int32')
          data = fluid.layers.diag(diagonal)
          # diagonal.shape=(3,) data.shape=(3, 3)
Z
zhoukunsheng 已提交
1527 1528

    """
1529 1530 1531
    check_type(diagonal, 'diagonal', (Variable, numpy.ndarray), 'diag')
    check_dtype(diagonal.dtype, 'diagonal',
                ['float32', 'float64', 'int32', 'int64'], 'diag')
Z
zhoukunsheng 已提交
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
    helper = LayerHelper("diag", **locals())

    if not isinstance(diagonal, Variable):
        diagonal = assign(diagonal)

    out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)

    helper.append_op(
        type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]})

    out.stop_gradient = True
    return out
Z
zhoukunsheng 已提交
1544 1545


1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
def eye(num_rows, num_columns=None, batch_shape=None, dtype='float32'):
    """
    **eye**

    This function constructs an identity tensor, or a batch of tensor.

    Args:
        num_rows(int): the number of rows in each batch tensor.
        num_columns(int): the number of columns in each batch tensor.
                          If None, default: num_rows.
        batch_shape(list(int)): If provided, the returned tensor will have a leading
                                batch size of this shape.
1558 1559
        dtype(string): The data type of the returned tensor.
                       It should be int32, int64, float16, float32, float64.
1560 1561

    Returns:
1562
        Variable: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
1563 1564 1565 1566 1567

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
1568 1569
          data = fluid.layers.eye(3, dtype='int32')
          # [[1, 0, 0]
1570
          #  [0, 1, 0]
1571 1572
          #  [0, 0, 1]]

1573
          data = fluid.layers.eye(2, 3, dtype='int32')
1574
          # [[1, 0, 0]
1575
          #  [0, 1, 0]]
1576 1577

          data = fluid.layers.eye(2, batch_shape=[3])
1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617
          # Construct a batch of 3 identity tensors, each 2 x 2.
          # data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2.

    """

    helper = LayerHelper("eye", **locals())
    if not isinstance(num_rows, int) or num_rows < 0:
        raise TypeError("num_rows should be a non-negative int")
    if num_columns is not None:
        if not isinstance(num_columns, int) or num_columns < 0:
            raise TypeError("num_columns should be a non-negative int")
    else:
        num_columns = num_rows
    out = helper.create_variable_for_type_inference(dtype=dtype)
    c_dtype = convert_np_dtype_to_dtype_(dtype)
    helper.append_op(
        type='eye',
        inputs={},
        outputs={'Out': [out]},
        attrs={
            'num_rows': num_rows,
            'num_columns': num_columns,
            'dtype': c_dtype
        },
        stop_gradient=True)
    out.stop_gradient = True

    if batch_shape is not None:
        if not isinstance(batch_shape, list):
            raise TypeError("batch_shape should be a list")
        from .nn import stack
        for batch_val in reversed(batch_shape):
            if batch_val <= 0:
                raise TypeError("batch_shape should be a positive int list")
            else:
                stack_vars = [out for _ in numpy.arange(batch_val)]
                out = stack(stack_vars, axis=0)
    return out


Z
zhoukunsheng 已提交
1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
def ones_like(x, out=None):
    """
    **ones_like**

    This function creates a ones tensor which has identical shape and dtype 
    with `x`.

    Args:
        x(Variable): The input tensor which specifies shape and dtype.
        out(Variable): The output tensor.

    Returns:
1630
        out(Variable): The tensor variable storing the output.
Z
zhoukunsheng 已提交
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid

          x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
          data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0]

    """
1641 1642
    check_variable_and_dtype(
        x, "x", ['bool', 'float32', 'float64', 'int32', 'int64'], 'ones_like')
Z
zhoukunsheng 已提交
1643 1644 1645 1646

    helper = LayerHelper("ones_like", **locals())
    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
1647 1648 1649 1650
    else:
        check_variable_and_dtype(
            out, "out", ['bool', 'float32', 'float64', 'int32', 'int64'],
            'ones_like')
Z
zhoukunsheng 已提交
1651 1652 1653 1654 1655 1656
    helper.append_op(
        type='fill_any_like',
        inputs={'X': [x]},
        attrs={'value': 1.0},
        outputs={'Out': [out]})
    return out
1657 1658


1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
def arange(start, end, step=1, dtype=None, name=None):
    """
    Return evenly spaced values within a given interval.
    Values are generated within the half-open interval [start, stop) (in other words,
    the interval including start but excluding stop).
    Parameters:
        start(float32 | float64 | int32 | int64 | Variable): Start of interval. The interval includes this value.
            when start is Variable, it is a 1-D Tensor with shape [1].
        end(float32 | float64 | int32 | int64 | Variable): End of interval. The interval does not include this
                                 value, except in some cases where step is not an integer
                                 and floating point round-off affects the length of out. When end is Variable,
                                 it is a 1-D Tensor with shape [1].
        step(float32 | float64 | int32 | int64 | Variable): Spacing between values. For any output out, this is the
                                  distance between two adjacent values, out[i+1] - out[i].
        dtype(str|core.VarDesc.VarType): the data type of the output tensor, can be float32, float64, int32, int64.
    Returns: a 1-D Tensor which is evenly spaced values within a given interval. Its data type is set by dtype.
    
    Return type: Variable
    examples:
        .. code-block:: python
             import paddle.fluid as fluid
             # expected out put: [0, 2, 4, 6, 8]
             data = fluid.layers.arange(0, 10, 2, 'int32')
         #dygraph mode
             import paddle.fluid as fluid
             with fluid.dygraph.guard():
                 x = fluid.layers.arange(0, 6, 2) 
                 # x: [0, 2, 4]
                 # x dtype: float32
             
    """
    helper = LayerHelper("range", **locals())

    if dtype is None:
        dtype = 'float32'

    check_dtype(dtype, 'create data type',
                ['float32', 'float64', 'int32', 'int64'], 'range')

    dtype = convert_dtype(dtype)
    if not isinstance(start, Variable):
        start = fill_constant([1], dtype, start)

    if not isinstance(end, Variable):
        end = fill_constant([1], dtype, end)

    if not isinstance(step, Variable):
        step = fill_constant([1], dtype, step)

    out = helper.create_variable_for_type_inference(dtype=start.dtype)

    helper.append_op(
        type='range',
        inputs={'Start': start,
                'End': end,
                'Step': step},
        outputs={'Out': [out]})
    out.stop_gradient = True
    return out


def full(shape,
         fill_value,
         out=None,
         dtype=None,
         device=None,
         stop_gradient=True,
         name=None):
    """
    This Op return a Tensor with the `fill_value` which size is same as `shape`
    
    Args:
        shape(list|tuple|Variable): Shape of the Tensor to be created.
                The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
                the elements of it should be integers or Tensors with shape [1].
                If ``shape`` is an Variable, it should be an 1-D Tensor .
        fill_value(bool|float16|float32|float64|int32|int64|Variable): The constant value
            used to initialize the Tensor to be created. If fill_value is an Variable, it must be an 1-D Tensor.
        out(Variable, optional): Optional output which can be any created 
            Variable that meets the requirements to store the result of operation.
            if out is None, a new Varibale will be create to store the result.
        dtype(np.dtype|core.VarDesc.VarType|str, optional): Data type of the output tensor
            which can be float16, float32, float64, int32, int64, if dytpe is `None`, the data
            type of created tensor is `float32`
        device(str, optional): On which device to run this Op. The :attr:`device` must be
            None, 'cpu' or 'gpu'. If :attr:`device` is None, the device that the user set in 
            the paddle program will be chosen. Default value is None.
        stop_gradient(bool, optional): Indicating if we stop gradient from current(out) Variable,
            default value is True.
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
    
    Returns:
        Variable: Tensor which is created according to shape and dtype.

    Raises:
        TypeError: The `dtype` must be one of None, bool, float16, float32, float64, int32 and int64.
        TypeError: The `out` must be a Variable.
        TypeError: The `shape` must be one of Variable, list tuple.
    
    Examples:
        .. code-block:: python

          import paddle.fluid as fluid

          data1 = fluid.layers.full(shape=[2,1], fill_value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.full(shape=[2,1], fill_value=5, dtype='int64', device='gpu') # data2=[[5],[5]]

          # attr shape is a list which contains Variable Tensor.
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
          data3 = fluid.layers.full(shape=[1, positive_2], dtype='float32', fill_value=1.5) # data3=[1.5, 1.5]

          # attr shape is an Variable Tensor.
          shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
          data4 = fluid.layers.full(shape=shape, dtype='bool', fill_value=True) # data4=[[True,True],[True,True]]
          
          # attr value is an Variable Tensor.
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.full(shape=[2,1], fill_value=val, dtype='float32') #data5=[[2.0],[2.0]]
    """

    helper = LayerHelper("full", **locals())

    if dtype is None:
        dtype = 'float32'

    check_dtype(dtype, 'create data type',
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
                'full')
    check_type(shape, 'shape', (Variable, list, tuple), 'full')
    if out is not None:
        check_type(shape, 'out', (Variable), 'full')

    if out is None:
        out = helper.create_variable_for_type_inference(dtype=dtype)

    out.stop_gradient = stop_gradient

    with device_guard(device):
        out = fill_constant(shape=shape, dtype=dtype, value=fill_value, out=out)

    return out


def _tril_triu_op(helper):
    """Base op of tril_op and triu_op
    """
    op_type = helper.layer_type
    x = helper.kwargs.get('input', None)

    assert x is not None, 'x cannot be None in {}'.format(op_type)
    check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
                             op_type)
    if len(x.shape) < 2:
        raise ValueError("input shape in {} must be at least 2-D".format(
            op_type))
    diagonal = helper.kwargs.get('diagonal', 0)
    if not isinstance(diagonal, (int, )):
        raise TypeError("diagonal in {} must be a python Int".format(op_type))
    name = helper.kwargs.get('name', None)

    if name is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
    else:
        out = helper.create_variable(
            name=name, dtype=x.dtype, persistable=False)

    helper.append_op(
        type="tril_triu",
        inputs={"X": x},
        attrs={
            "diagonal": diagonal,
            "lower": True if op_type == 'tril' else False,
        },
        outputs={"Out": out}, )

    return out


def tril(input, diagonal=0, name=None):
    """
    This op returns the lower triangular part of a matrix (2-D tensor) or batch
    of matrices :attr:`input`, the other elements of the result tensor are set 
    to 0. The lower triangular part of the matrix is defined as the elements 
    on and below the diagonal.

    Args:
        input (Variable): The input variable which is a Tensor.
            Support data types: ``float64``, ``float32``, ``int32``, ``int64``.
        diagonal (int, optional): The diagonal to consider, default value is 0.
            If :attr:`diagonal` = 0, all elements on and below the main diagonal are
            retained. A positive value includes just as many diagonals above the main
            diagonal, and similarly a negative value excludes just as many diagonals below
            the main diagonal. The main diagonal are the set of indices
            :math:`\{(i, i)\}` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
            :math:`d_{1}, d_{2}` are the dimensions of the matrix.
        name (str, optional): The default value is None. Normally there is no need for
            user to set this property. For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Variable: Tensor, results of lower triangular operation by the specified diagonal of input tensor,
        it's data type is the same as input's Tensor.

    Raises:
        TypeError: diagonal is not a int type.
        ValueError: dimension of :attr:`input` is less than 2.

    Examples:
        .. code-block:: python

            import numpy as np
            import paddle.fluid as fluid

            data = np.arange(1, 13, dtype="int64").reshape(3,-1)
            # array([[ 1,  2,  3,  4],
            #        [ 5,  6,  7,  8],
            #        [ 9, 10, 11, 12]])
            x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
            exe = fluid.Executor(fluid.CPUPlace())

            # example 1, default diagonal
            tril = fluid.layers.tril(x)
            tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
                fetch_list=[tril], return_numpy=True)
            # array([[ 1,  0,  0,  0],
            #        [ 5,  6,  0,  0],
            #        [ 9, 10, 11,  0]])

        .. code-block:: python

            # example 2, positive diagonal value
            import paddle.fluid as fluid
            import numpy as np

            data = np.arange(1, 13, dtype="int64").reshape(3,-1)
            x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
            exe = fluid.Executor(fluid.CPUPlace())

            tril = fluid.layers.tril(x, diagonal=2)
            tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
                fetch_list=[tril], return_numpy=True)
            # array([[ 1,  2,  3,  0], 
            #        [ 5,  6,  7,  8],
            #        [ 9, 10, 11, 12]])

        .. code-block:: python

            # example 3, negative diagonal value
            import paddle.fluid as fluid
            import numpy as np

            data = np.arange(1, 13, dtype="int64").reshape(3,-1)
            x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
            exe = fluid.Executor(fluid.CPUPlace())

            tril = fluid.layers.tril(x, diagonal=-1)
            tril_out, = exe.run(fluid.default_main_program(), feed={"x": data},
                fetch_list=[tril], return_numpy=True)
            # array([[ 0,  0,  0,  0],
            #        [ 5,  0,  0,  0],
            #        [ 9, 10,  0,  0]])

   """

    return _tril_triu_op(LayerHelper('tril', **locals()))


def triu(input, diagonal=0, name=None):
    """
    This op returns the upper triangular part of a matrix (2-D tensor) or batch of matrices
    :attr:`input`, the other elements of the result tensor are set to 0.
    The upper triangular part of the matrix is defined as the elements on and
    above the diagonal.

    Args:
        input (Variable): The input variable which is a Tensor.
            Support data types: ``float64``, ``float32``, ``int32``, ``int64``.
        diagonal (int, optional): The diagonal to consider, default value is 0.
            If :attr:`diagonal` = 0, all elements on and above the main diagonal are
            retained. A positive value excludes just as many diagonals above the main
            diagonal, and similarly a negative value includes just as many diagonals below
            the main diagonal. The main diagonal are the set of indices
            :math:`\{(i, i)\}` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where
            :math:`d_{1}, d_{2}` are the dimensions of the matrix.
        name (str, optional): The default value is None. Normally there is no need for
            user to set this property. For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        Variable: Tensor, results of upper triangular operation by the specified diagonal of input tensor,
        it's data type is the same as input's Tensor.

    Raises:
        TypeError: diagonal is not a int type.
        ValueError: dimension of :attr:`input` is less than 2.

    Examples:
        .. code-block:: python

            import numpy as np
            import paddle.fluid as fluid

            data = np.arange(1, 13, dtype="int64").reshape(3,-1)
            # array([[ 1,  2,  3,  4],
            #        [ 5,  6,  7,  8],
            #        [ 9, 10, 11, 12]])
            x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
            exe = fluid.Executor(fluid.CPUPlace())

            # example 1, default diagonal
            import paddle.fluid as fluid
            triu = fluid.layers.triu(x)
            triu_out, = exe.run(fluid.default_main_program(), feed={"x": data},
                fetch_list=[triu], return_numpy=True)
            # array([[ 1,  2,  3,  4],
            #        [ 0,  6,  7,  8],
            #        [ 0,  0, 11, 12]])

        .. code-block:: python

            # example 2, positive diagonal value
            import paddle.fluid as fluid
            import numpy as np

            data = np.arange(1, 13, dtype="int64").reshape(3,-1)
            x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
            exe = fluid.Executor(fluid.CPUPlace())

            triu = fluid.layers.triu(x, diagonal=2)
            triu_out, = exe.run(fluid.default_main_program(), feed={"x": data},
                fetch_list=[triu], return_numpy=True)
            # array([[0, 0, 3, 4],
            #        [0, 0, 0, 8],
            #        [0, 0, 0, 0]])

        .. code-block:: python

            # example 3, negative diagonal value
            import paddle.fluid as fluid
            import numpy as np

            data = np.arange(1, 13, dtype="int64").reshape(3,-1)
            x = fluid.data(shape=(-1, 4), dtype='int64', name='x')
            exe = fluid.Executor(fluid.CPUPlace())

            triu = fluid.layers.triu(x, diagonal=-1)
            triu_out, = exe.run(fluid.default_main_program(), feed={"x": data},
                fetch_list=[triu], return_numpy=True)
            # array([[ 1,  2,  3,  4],
            #        [ 5,  6,  7,  8],
            #        [ 0, 10, 11, 12]])

    """

    return _tril_triu_op(LayerHelper('triu', **locals()))


2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078
@templatedoc(op_type="kron")
def kron(x, y, out=None, name=None):
    """${comment}

    Args:
        x (Variable): the fist operand of kron op, data type: float16, float32, 
            float64, int32 or int64.
        y (Variable): the second operand of kron op, data type: float16, 
            float32, float64, int32 or int64. Its data type should be the same 
            with x.
        out (Variable, optional): Optional output which can be any created 
            Variable that meets the requirements to store the result of 
            operation. If out is None, a new Varibale will be create to store 
            the result. Defaults to None.
        name(str, optional): The default value is None.  Normally there is no 
            need for user to set this property.  For more information, please 
            refer to :ref:`api_guide_Name`.

    Returns:
        Variable: The output of kron op, data type: float16, float32, float64, int32 or int64. Its data is the same with x.

    Examples:
        .. code-block:: python
        
          import paddle
          from paddle import fluid
          import paddle.fluid.dygraph as dg
          import numpy as np

          a = np.arange(1, 5).reshape(2, 2).astype(np.float32)
          b = np.arange(1, 10).reshape(3, 3).astype(np.float32)

          place = fluid.CPUPlace()
          with dg.guard(place):
              a_var = dg.to_variable(a)
              b_var = dg.to_variable(b)
              c_var = fluid.layers.kron(a_var, b_var)
              c_np = c_var.numpy()
          print(c_np)

          #[[ 1.  2.  3.  2.  4.  6.]
          # [ 4.  5.  6.  8. 10. 12.]
          # [ 7.  8.  9. 14. 16. 18.]
          # [ 3.  6.  9.  4.  8. 12.]
          # [12. 15. 18. 16. 20. 24.]
          # [21. 24. 27. 28. 32. 36.]]
    """
    if in_dygraph_mode():
        return core.ops.kron(x, y)

    helper = LayerHelper('kron', **locals())
    check_variable_and_dtype(
        x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
    check_variable_and_dtype(
        y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')

    if out is None:
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
    else:
        check_variable_and_dtype(
            out, 'out', ['float16', 'float32', 'float64', 'int32', 'int64'],
            'kron')
    helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out})
    return out