tensor.py 38.0 KB
Newer Older
1
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import math
16 17 18
import numpy
import warnings

Y
Yu Yang 已提交
19
from ..layer_helper import LayerHelper
20
from ..param_attr import ParamAttr
21
from ..initializer import Initializer
22 23 24 25 26 27 28 29 30 31
from ..framework import (
    _current_expected_place,
    convert_np_dtype_to_dtype_,
    _non_static_mode,
    _varbase_creator,
    device_guard,
    _in_legacy_dygraph,
    in_dygraph_mode,
    _get_paddle_place,
)
X
xuwei06 已提交
32
from ..framework import Variable
33
from ..initializer import Constant
34
from ..core import VarDesc
35
from .. import core
36
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
37
from . import utils
38 39 40 41 42 43
from ..data_feeder import (
    check_variable_and_dtype,
    check_type,
    check_dtype,
    convert_dtype,
)
44
from paddle.utils import deprecated
45

46
from .utils import check_shape
47
from paddle import _C_ops, _legacy_C_ops
Y
Yu Yang 已提交
48 49

__all__ = [
50 51 52 53 54 55 56 57 58 59 60
    'create_global_var',
    'cast',
    'tensor_array_to_tensor',
    'concat',
    'sums',
    'assign',
    'fill_constant_batch_size_like',
    'fill_constant',
    'argmin',
    'argmax',
    'zeros',
Y
Yu Yang 已提交
61 62 63
]


64 65 66
def create_global_var(
    shape, value, dtype, persistable=False, force_cpu=False, name=None
):
67
    """
68
    This function creates a new tensor variable with value in the global block(block 0).
F
fengjiayi 已提交
69

70
    Parameters:
71
        shape (list[int]|tuple[int]): Shape of the variable
72
        value (float): The value of the variable. The new created
F
fengjiayi 已提交
73
                      variable will be filled with it.
74 75
        dtype (str): Data type of the variable
        persistable (bool, optional): If this variable is persistable.
F
fengjiayi 已提交
76
                           Default: False
77
        force_cpu (bool, optional): Force this variable to be on CPU.
F
fengjiayi 已提交
78
                         Default: False
79 80
        name (str, optional): For detailed information, please refer to
           :ref:`api_guide_Name` . Usually name is no need to set and None by default.
81 82

    Returns:
83
        Variable: The created Variable
F
fengjiayi 已提交
84 85 86 87

    Examples:
        .. code-block:: python

88 89 90
            import paddle
            paddle.enable_static()
            var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
91
                                           persistable=True, force_cpu=True, name='new_var')
92
    """
93 94 95
    check_type(
        shape, 'shape', (list, tuple, numpy.ndarray), 'create_global_var'
    )
96
    for item in shape:
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
        check_type(
            item,
            'item of shape',
            (
                int,
                numpy.uint8,
                numpy.int8,
                numpy.int16,
                numpy.int32,
                numpy.int64,
            ),
            'create_global_var',
        )

    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int8',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'create_global_var',
    )
128

Q
Qiao Longfei 已提交
129
    helper = LayerHelper("global_var", **locals())
130 131 132 133 134 135 136 137 138 139
    var = helper.create_global_variable(
        dtype=dtype,
        shape=shape,
        persistable=persistable,
        name=name,
        stop_gradient=True,
    )
    helper.set_variable_initializer(
        var, initializer=Constant(value=float(value), force_cpu=force_cpu)
    )
M
minqiyang 已提交
140

Q
Qiao Longfei 已提交
141 142 143
    return var


144
def cast(x, dtype):
Y
Yu Yang 已提交
145
    """
S
swtkiwi 已提交
146

147
    This OP takes in the Tensor :attr:`x` with :attr:`x.dtype` and casts it
148 149
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
150 151

    Args:
152
        x(Tensor): An input N-D Tensor with data type bool, float16,
153
            float32, float64, int32, int64, uint8.
154
        dtype(np.dtype|str): Data type of the output:
155
            bool, float16, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
156 157

    Returns:
158
        Tensor: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
159 160 161

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
162

163
            import paddle
164

165 166
            x = paddle.to_tensor([2, 3, 4], 'float64')
            y = paddle.cast(x, 'uint8')
Y
Yu Yang 已提交
167
    """
H
hong 已提交
168 169 170
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
171
        return _C_ops.cast(x, dtype)
H
hong 已提交
172

J
Jiabin Yang 已提交
173
    if _non_static_mode():
174 175
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
176
        out = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
Z
Zhang Ting 已提交
177
        return out
178

179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
    check_variable_and_dtype(
        x,
        'x',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'cast',
    )
    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int8',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'cast',
    )
212 213

    helper = LayerHelper('cast', **locals())
214
    out = helper.create_variable_for_type_inference(
215 216 217 218 219 220 221 222
        dtype=dtype, stop_gradient=x.stop_gradient
    )
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype},
    )
Y
Yu Yang 已提交
223 224 225
    return out


226
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
227
    """
228
    This OP concatenates the input along the axis.
229 230

    Args:
231
        input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type
232
            bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type.
233 234
        axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
            It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64.
235
            The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way
236
            as ``axis+R``. Default is 0.
237 238 239
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
240 241

    Returns:
242
        Tensor: A Tensor with the same data type as ``input``.
243 244 245

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
246

247
            import paddle.fluid as fluid
248 249
            import numpy as np

250 251 252 253 254 255
            in1 = np.array([[1, 2, 3],
                            [4, 5, 6]])
            in2 = np.array([[11, 12, 13],
                            [14, 15, 16]])
            in3 = np.array([[21, 22],
                            [23, 24]])
256 257 258 259
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
260 261
                # When the axis is negative, the real axis is (axis + Rank(x)).
                # As follows, axis is -1, Rank(x) is 2, the real axis is 1
262 263
                out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1, x2], axis=0)
264 265 266 267 268 269 270 271
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
272
    """
273

274 275 276 277 278 279
    if in_dygraph_mode():
        if isinstance(axis, Variable):
            axis = axis.numpy()
            axis = axis.item(0)
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
280
        out = _C_ops.concat(input, axis)
281
        return out
282 283

    if _in_legacy_dygraph():
S
songyouwei 已提交
284 285
        if isinstance(axis, Variable):
            axis = axis.numpy()
286
            axis = axis.item(0)
287 288
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
289
        out = _varbase_creator()
290
        _legacy_C_ops.concat(input, out, 'axis', axis)
291
        return out
292

293 294 295 296
    check_type(input, 'input', (list, tuple, Variable), 'concat')
    if not isinstance(input, Variable):
        for id, x in enumerate(input):
            check_variable_and_dtype(
297 298
                x,
                'input[' + str(id) + ']',
299
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
300 301
                'concat',
            )
302 303
            if x.dtype != input[0].dtype:
                raise TypeError(
304 305
                    "All the Tensors in the input must have the same data type."
                )
306
    else:
307
        input = [input]
308
    check_type(axis, 'axis', (int, Variable), 'concat')
309

310 311
    if isinstance(axis, Variable):
        check_dtype(
312 313 314 315 316
            axis.dtype,
            'axis',
            ['int32', 'int64'],
            'concat',
            "The data type of axis must be int32 or int64 when axis is a Tensor",
317
        )
318

319
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
320
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
321 322

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
323 324 325 326
        # NOTE(liym27): Don't remove this if branch!
        # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
        # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.

327 328 329 330
        assert len(input) == 1, (
            "If the elements of 'input' in concat are Variable(LoDTensorArray), "
            "number of the elements must be 1, but received %s." % len(input)
        )
331
        out_index = helper.create_variable_for_type_inference(dtype="int32")
332 333 334 335 336 337
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': input[0]},
            outputs={'Out': [out], 'OutIndex': [out_index]},
            attrs={'axis': axis, 'use_stack': False},
        )
338 339 340 341 342
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
343
        attrs['axis'] = axis
344

345 346 347
        helper.append_op(
            type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs
        )
Y
Yu Yang 已提交
348 349 350
    return out


G
Guo Sheng 已提交
351
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
352
    r"""
G
Guo Sheng 已提交
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
    This function concatenates or stacks all tensors in the input LoDTensorArray
    along the axis mentioned and returns that as the output.

    For Example:

    .. code-block:: text

        Case 1:

            Given:

                input.data = {[[0.6, 0.1, 0.3],
                               [0.5, 0.3, 0.2]],
                              [[1.3],
                               [1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = False

            Then:

                output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                               [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

                output_index.data = [3, 1, 2]

        Case 2:

            Given:

                input.data = {[[0.6, 0.1],
                               [0.5, 0.3]],
                              [[0.3, 1.3],
                               [0.2, 1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = True

            Then:

                output.data = [[[0.6, 0.1]
                                [0.3, 1.3]
                                [2.3, 2.1],
                               [[0.5, 0.3]
                                [0.2, 1.8]
                                [2.5, 2.4]]]

                output_index.data = [2, 2, 2]
L
li099 已提交
403 404

    Args:
G
Guo Sheng 已提交
405 406 407 408 409 410 411
        input(Variable): A LodTensorArray variable.
        axis(int): The axis along which the tensors in attr::`input` will be
            concatenated or stacked.
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
        use_stack(bool): Act as concat_op or stack_op. For stack mode, all
            tensors in the tensor array must have the same shape.
L
li099 已提交
412 413

    Returns:
G
Guo Sheng 已提交
414 415 416
        Variable: The concatenated or stacked tensor variable.
        Variable: A 1-D tensor variable with int32 data type. The data in this \
            tensor contains all input including tensors' sizes along the axis.
L
li099 已提交
417 418 419 420

    Examples:
        .. code-block:: python

421
            import paddle.fluid as fluid
422
            import numpy as np
G
Guo Sheng 已提交
423 424 425 426 427 428 429
            x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
            array = fluid.layers.create_array(dtype='float32')
            fluid.layers.array_write(x0, i, array)
            fluid.layers.array_write(x1, i + 1, array)
            output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
L
li099 已提交
430
    """
J
Jiabin Yang 已提交
431
    if _non_static_mode():
432
        assert isinstance(
433 434
            input, list
        ), "The 'input' in tensor_array_to_tensor must be list"
435
        from .nn import concat
436
        from ..dygraph import to_variable
437
        from paddle import stack
438

439 440 441
        op = stack if use_stack else concat
        res = op(input, axis=axis)
        sizes = to_variable(
442 443
            numpy.array(list(map(lambda x: int(x.shape[axis]), input)))
        )
444 445
        return res, sizes

446 447 448
    check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
    if isinstance(input, list):
        for i, input_x in enumerate(input):
449 450 451 452 453 454
            check_type(
                input_x,
                'input[' + str(i) + ']',
                Variable,
                'tensor_array_to_tensor',
            )
L
li099 已提交
455
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
456 457
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
458 459 460 461 462 463
    helper.append_op(
        type='tensor_array_to_tensor',
        inputs={'X': input},
        outputs={'Out': [out], 'OutIndex': [out_index]},
        attrs={'axis': axis, 'use_stack': use_stack},
    )
L
li099 已提交
464 465 466
    return out, out_index


467
def sums(input, out=None):
468
    r"""
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
490 491

    Args:
492 493 494 495
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
496 497

    Returns:
498 499
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
500 501

    Examples:
F
fengjiayi 已提交
502
        .. code-block:: python
K
kavyasrinet 已提交
503

504 505 506 507 508 509 510 511 512
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
513

514 515
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
516
    """
517 518 519
    check_type(input, 'input', (Variable, tuple, list), 'sums')
    if isinstance(input, list) or isinstance(input, tuple):
        for input_section in input:
520 521 522 523 524 525
            check_variable_and_dtype(
                input_section,
                "input",
                ['float16', 'float32', 'float64', 'int32', 'int64'],
                'sums',
            )
526
    else:
527 528 529 530 531 532
        check_variable_and_dtype(
            input,
            "input",
            ['float16', 'float32', 'float64', 'int32', 'int64'],
            'sums',
        )
533

Y
Yu Yang 已提交
534 535
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
536
        out = helper.create_variable_for_type_inference(
537 538
            dtype=helper.input_dtype()
        )
539
    else:
540 541 542 543 544 545 546 547 548 549
        check_variable_and_dtype(
            out, "out", ['float32', 'float64', 'int32', 'int64'], 'sums'
        )

    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False},
    )
Y
Yu Yang 已提交
550 551 552
    return out


F
fengjiayi 已提交
553
def assign(input, output=None):
554
    """
S
swtkiwi 已提交
555

556
    The OP copies the :attr:`input` to the :attr:`output`.
557

558
    Parameters:
559 560 561 562
        input (Tensor|numpy.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar,
            or scalar. Its data type supports float16, float32, float64, int32, int64, and bool.
            Note: the float64 data will be converted to float32 because of current platform protobuf
            data limitation.
563
        output (Tensor, optional): A tensor. If :attr:`output` is None, a new tensor will
564
            be created as :attr:`output`. Default: None.
565 566

    Returns:
567
        Tensor: A tensor with the same shape, data type and value as :attr:`input`.
568 569 570

    Examples:
        .. code-block:: python
571

572
          import paddle
573
          import numpy as np
574
          data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
575 576 577 578
          array = np.array([[1, 1],
                            [3, 4],
                            [1, 3]]).astype(np.int64)
          result1 = paddle.zeros(shape=[3, 3], dtype='float32')
579 580 581
          paddle.assign(array, result1) # result1 = [[1, 1], [3 4], [1, 3]]
          result2 = paddle.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
582
    """
Y
Yu Yang 已提交
583
    helper = LayerHelper('assign', **locals())
584 585 586 587 588 589
    check_type(
        input,
        'input',
        (Variable, numpy.ndarray, list, tuple, float, int, bool),
        'assign',
    )
590 591
    is_inplace = True if output is not None else False

592 593 594 595
    if numpy.isscalar(input) and not isinstance(input, str):
        input = numpy.array([input])
    elif isinstance(input, (list, tuple)):
        input = numpy.array(input)
596 597
    # NOTE(Aurelius84): Why we judge core.VarBase?
    # In case of @to_static, a VarBase can be as input of `assign`,
J
Jiabin Yang 已提交
598
    # but _non_static_mode()==False under @to_static, which means
599 600 601
    # isinstance(VarBase, Variable) == False. It will cause return None
    # after this api.
    if isinstance(input, (Variable, core.VarBase)):
602
        if _non_static_mode():
C
chentianyu03 已提交
603
            if in_dygraph_mode() and output is None:
604
                output = _C_ops.assign(input)
605 606
            elif in_dygraph_mode() and output is not None:
                _C_ops.assign_out_(input, output)
C
chentianyu03 已提交
607 608 609 610 611 612
            else:
                if output is None:
                    if _in_legacy_dygraph():
                        output = core.VarBase()
                    else:
                        output = core.eager.Tensor()
613
                _legacy_C_ops.assign(input, output)
614
        else:
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
            check_dtype(
                input.dtype,
                'input',
                [
                    'float16',
                    'uint16',
                    'float32',
                    'float64',
                    'int32',
                    'int64',
                    'uint8',
                    'bool',
                ],
                'assign',
                '(When the type of input in assign is Variable.)',
            )
631 632
            if output is None:
                output = helper.create_variable_for_type_inference(
633 634 635 636 637
                    dtype=input.dtype
                )
            helper.append_op(
                type='assign', inputs={'X': [input]}, outputs={'Out': [output]}
            )
X
xuwei06 已提交
638
    elif isinstance(input, numpy.ndarray):
639 640 641 642 643
        # Not support [var, var, ...] currently.
        if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
            raise TypeError(
                "Required type(input) numpy.ndarray, but found `list(Variable)` in input."
            )
X
xuwei06 已提交
644
        dtype = convert_np_dtype_to_dtype_(input.dtype)
645 646 647 648 649 650
        if dtype == VarDesc.VarType.FP64:
            # Setting FP64 numpy data is not supported in Paddle, so we
            # use FP32 here
            warnings.warn(
                "paddle.assign doesn't support float64 input now due "
                "to current platform protobuf data limitation, we convert "
651 652
                "it to float32"
            )
653
            dtype = VarDesc.VarType.FP32
654 655
        if dtype == VarDesc.VarType.BOOL:
            value_name = "bool_values"
W
wanghuancoder 已提交
656
            values = [int(v) for v in input.flat]
657
        elif dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
658
            value_name = "fp32_values"
659
            values = [float(v) for v in input.flat]
660
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
661
            value_name = "int32_values"
662
            values = [int(v) for v in input.flat]
663 664 665
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
666
        else:
667 668
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
669
                "the data type of 'input' must be bool, float32, int32 or int64, but "
670 671
                "received %s." % convert_dtype(dtype)
            )
672
        if input.size > 1024 * 1024:
673 674 675 676
            raise ValueError(
                "The size of input is too big. Please consider "
                "saving it to file and 'load_op' to load it"
            )
677 678 679
        if in_dygraph_mode():
            if output is None:
                output = zeros(list(input.shape), dtype)
680 681 682 683 684 685 686
            _C_ops.assign_value_(
                output,
                list(input.shape),
                dtype,
                values,
                _current_expected_place(),
            )
687 688 689
        elif _in_legacy_dygraph():
            if output is None:
                output = core.VarBase()
690 691 692 693 694 695 696 697 698
            _legacy_C_ops.assign_value(
                output,
                'shape',
                list(input.shape),
                'dtype',
                dtype,
                value_name,
                values,
            )
699
        else:
700 701
            if output is None:
                output = helper.create_variable_for_type_inference(
702 703 704 705 706 707 708 709 710 711 712
                    dtype=input.dtype
                )
            helper.append_op(
                type='assign_value',
                outputs={'Out': [output]},
                attrs={
                    'dtype': dtype,
                    'shape': list(input.shape),
                    value_name: values,
                },
            )
X
xuwei06 已提交
713

J
Jiabin Yang 已提交
714
    if is_inplace and _non_static_mode():
715
        output._bump_inplace_version()
716

Y
Yu Yang 已提交
717 718 719
    return output


720
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
Y
Yu Yang 已提交
721
    """
S
swtkiwi 已提交
722

W
wangchaochaohu 已提交
723
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
724
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
725

T
tianshuo78520a 已提交
726
    The attribute `stop_gradient` of the created Tensor is set to True.
727 728

    Args:
729 730 731
        shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64.
            If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
            If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64.
W
wangchaochaohu 已提交
732
        dtype(np.dtype|str): Data type of the output Tensor which can
733
            be float16, float32, float64, uint8, int16, int32, int64.
734
        value(bool|float|int|Tensor): The constant value used to initialize
735 736
            the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor.
        force_cpu(bool, optional): data should be on CPU if it's true, default value is False.
737
        out(Tensor, optional): Optional output which can be any created
738 739
            Tensor that meets the requirements to store the result of operation.
            if ``out`` is None, a new Tensor will be create to store the result.
740 741
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
742 743

    Returns:
744
        Tensor: Tensor which is created according to shape and dtype.
W
wangchaochaohu 已提交
745

746 747 748
    Examples:
        .. code-block:: python

749
          import paddle.fluid as fluid
750
          # attr shape is a list which doesn't contain  Tensor.
751 752
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
753
          # data1=[[5], [5]] data2=[[5], [5]]
754

755
          # attr shape is a list which contains Tensor.
756
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
757
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]]
758

759
          # attr shape is a Tensor.
760
          shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
761
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
762

763
          # attr value is a Tensor.
W
wangchaochaohu 已提交
764 765
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
Y
Yu Yang 已提交
766
    """
767

W
wangchaochaohu 已提交
768
    attrs = {'force_cpu': force_cpu}
769
    dtype = convert_dtype(dtype)
770
    if not isinstance(value, Variable):
771
        if dtype in ['uint8', 'int16', 'int32', 'int64']:
W
wangchaochaohu 已提交
772
            attrs['str_value'] = str(int(value))
773
            attrs['value'] = int(value)
W
wangchaochaohu 已提交
774 775
        else:
            attrs['str_value'] = str(float(value))
776
            attrs['value'] = float(value)
777

778 779 780 781 782
    if in_dygraph_mode():
        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
        if isinstance(shape, (list, tuple)):
783
            shape = utils.convert_shape_to_list(shape)
784 785 786 787 788

        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        if out is None:
789
            out = _C_ops.full(shape, float(value), dtype, place)
790 791 792
            out.stop_gradient = True
            return out

793 794
        if out is not None:
            # final state mode is support out is not None.
795
            _C_ops.full_(out, shape, float(value), dtype, place)
796 797
            out.stop_gradient = True
            return out
798

799 800 801 802 803 804 805 806 807 808 809
    if _in_legacy_dygraph():
        shape = utils.convert_shape_to_list(shape)
        if out is None:
            out = _varbase_creator(dtype=dtype)

        if isinstance(value, Variable):
            if dtype in ['uint8', 'int16', 'int32', 'int64']:
                attrs['str_value'] = str(int(value.numpy().item(0)))
            else:
                attrs['str_value'] = str(float(value.numpy().item(0)))

810 811 812 813 814 815 816 817 818 819 820 821 822
        _legacy_C_ops.fill_constant(
            out,
            'value',
            float(value),
            'force_cpu',
            force_cpu,
            'dtype',
            out.dtype,
            'str_value',
            attrs['str_value'],
            'shape',
            shape,
        )
823 824 825
        out.stop_gradient = True
        return out

826 827 828
    helper = LayerHelper("fill_constant", **locals())
    inputs = {}
    if isinstance(value, Variable):
829 830
        if convert_dtype(value.dtype) != dtype:
            value = cast(value, dtype)
831 832
        inputs['ValueTensor'] = value

833
    check_shape(shape)
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'uint8',
            'int16',
            'int32',
            'int64',
            'complex64',
            'complex128',
        ],
        'fill_constant',
    )
851
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
852

853
    if out is not None:
854 855 856
        check_variable_and_dtype(
            out, 'out', [convert_dtype(dtype)], 'fill_constant'
        )
857 858

    helper = LayerHelper("fill_constant", **locals())
859 860 861
    utils.get_shape_tensor_inputs(
        inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant'
    )
L
liym27 已提交
862

Y
Yu Yang 已提交
863
    if out is None:
X
Xin Pan 已提交
864
        out = helper.create_variable_for_type_inference(dtype=dtype)
L
liym27 已提交
865
    attrs['dtype'] = out.dtype
866 867 868 869 870 871 872
    helper.append_op(
        type='fill_constant',
        inputs=inputs,
        outputs={'Out': [out]},
        attrs=attrs,
        stop_gradient=True,
    )
Y
Yu Yang 已提交
873 874 875 876
    out.stop_gradient = True
    return out


877
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
Y
yuyang18 已提交
878
@templatedoc()
879 880 881 882 883 884 885 886 887
def fill_constant_batch_size_like(
    input,
    shape,
    dtype,
    value,
    input_dim_idx=0,
    output_dim_idx=0,
    force_cpu=False,
):
888
    """
T
tianshuo78520a 已提交
889
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
890 891 892 893
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
894 895

    Args:
W
wangchaochaohu 已提交
896 897 898 899 900
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
901
        value(float|int): The constant value used to initialize the Tensor to be created.
W
wangchaochaohu 已提交
902 903 904 905 906
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
907
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
908 909

    Returns:
W
wangchaochaohu 已提交
910
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
911 912 913 914 915

    Examples:

        .. code-block:: python

916
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
917
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
918
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
919
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
920

921
    """
922 923 924 925 926 927 928
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
929 930 931
        out = _C_ops.full_batch_size_like(
            input, shape, dtype, value, input_dim_idx, output_dim_idx, place
        )
932 933 934
        out.stop_gradient = True
        return out

Y
Yu Yang 已提交
935
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
936
    out = helper.create_variable_for_type_inference(dtype=dtype)
937 938 939 940 941 942
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
943
        'force_cpu': force_cpu,
944 945 946 947 948
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
949 950 951 952 953 954
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
        attrs=attrs,
    )
Y
Yu Yang 已提交
955 956 957 958
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
959 960
def argmin(x, axis=0):
    """
961 962 963
        :alias_main: paddle.argmin
        :alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
        :old_api: paddle.fluid.layers.argmin
S
swtkiwi 已提交
964

S
sneaxiy 已提交
965 966
    **argmin**

967 968
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
969 970

    Args:
971 972 973 974 975
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
976

S
sneaxiy 已提交
977
    Returns:
978
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
979

S
sneaxiy 已提交
980 981
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
982

983
            import paddle.fluid as fluid
984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
1011
    """
1012
    check_variable_and_dtype(
1013 1014 1015 1016 1017
        x,
        'x',
        ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmin',
    )
S
sneaxiy 已提交
1018
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
1019
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
1020 1021 1022 1023 1024 1025
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis},
    )
1026
    out.stop_gradient = True
S
sneaxiy 已提交
1027 1028 1029 1030 1031 1032 1033
    return out


def argmax(x, axis=0):
    """
    **argmax**

1034 1035
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
1036 1037

    Args:
1038 1039 1040 1041 1042
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
1043

S
sneaxiy 已提交
1044
    Returns:
1045
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
1046

S
sneaxiy 已提交
1047 1048
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
1049

1050
            import paddle.fluid as fluid
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
1078
    """
1079
    check_variable_and_dtype(
1080 1081 1082 1083 1084
        x,
        'x',
        ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmax',
    )
S
sneaxiy 已提交
1085
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
1086
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
1087 1088 1089 1090 1091 1092
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis},
    )
1093
    out.stop_gradient = True
S
sneaxiy 已提交
1094 1095 1096
    return out


1097
def zeros(shape, dtype, force_cpu=False, name=None):
Y
Yu Yang 已提交
1098
    """
1099 1100
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1101

1102
    Parameters:
1103
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
W
wangchaochaohu 已提交
1104
        dtype (np.dtype|str): Data type of output Tensor, it supports
1105
            bool, float16, float32, float64, int32 and int64.
1106 1107
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1108
            Default: False.
1109 1110
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
1111 1112

    Returns:
1113
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
1114 1115 1116 1117

    Examples:
        .. code-block:: python

1118
          import paddle.fluid as fluid
1119
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
1120

1121 1122 1123
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
Y
Yu Yang 已提交
1124 1125
    """
    return fill_constant(value=0.0, **locals())