tensor.py 35.6 KB
Newer Older
1
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17
import numpy
import warnings

Y
Yu Yang 已提交
18
from ..layer_helper import LayerHelper
19 20 21 22 23 24 25 26
from ..framework import (
    _current_expected_place,
    convert_np_dtype_to_dtype_,
    _non_static_mode,
    _varbase_creator,
    _in_legacy_dygraph,
    in_dygraph_mode,
)
X
xuwei06 已提交
27
from ..framework import Variable
28
from ..core import VarDesc
29
from .. import core
30
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
31
from . import utils
32 33 34 35 36 37
from ..data_feeder import (
    check_variable_and_dtype,
    check_type,
    check_dtype,
    convert_dtype,
)
38
from paddle.utils import deprecated
39

40
from .utils import check_shape
41
from paddle import _C_ops, _legacy_C_ops
Y
Yu Yang 已提交
42 43

__all__ = [
44 45 46 47 48 49 50 51 52 53
    'cast',
    'tensor_array_to_tensor',
    'concat',
    'sums',
    'assign',
    'fill_constant_batch_size_like',
    'fill_constant',
    'argmin',
    'argmax',
    'zeros',
Y
Yu Yang 已提交
54 55 56
]


57
def cast(x, dtype):
Y
Yu Yang 已提交
58
    """
S
swtkiwi 已提交
59

60
    This OP takes in the Tensor :attr:`x` with :attr:`x.dtype` and casts it
61 62
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
63 64

    Args:
65
        x(Tensor): An input N-D Tensor with data type bool, float16,
66
            float32, float64, int32, int64, uint8.
67
        dtype(np.dtype|str): Data type of the output:
68
            bool, float16, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
69 70

    Returns:
71
        Tensor: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
72 73 74

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
75

76
            import paddle
77

78 79
            x = paddle.to_tensor([2, 3, 4], 'float64')
            y = paddle.cast(x, 'uint8')
Y
Yu Yang 已提交
80
    """
H
hong 已提交
81 82 83
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
84
        return _C_ops.cast(x, dtype)
H
hong 已提交
85

J
Jiabin Yang 已提交
86
    if _non_static_mode():
87 88
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
89
        out = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
Z
Zhang Ting 已提交
90
        return out
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
    check_variable_and_dtype(
        x,
        'x',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'cast',
    )
    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int8',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'cast',
    )
125 126

    helper = LayerHelper('cast', **locals())
127
    out = helper.create_variable_for_type_inference(
128 129 130 131 132 133 134 135
        dtype=dtype, stop_gradient=x.stop_gradient
    )
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype},
    )
Y
Yu Yang 已提交
136 137 138
    return out


139
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
140
    """
141
    This OP concatenates the input along the axis.
142 143

    Args:
144
        input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type
145
            bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type.
146 147
        axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
            It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64.
148
            The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way
149
            as ``axis+R``. Default is 0.
150 151 152
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
153 154

    Returns:
155
        Tensor: A Tensor with the same data type as ``input``.
156 157 158

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
159

160
            import paddle.fluid as fluid
161 162
            import numpy as np

163 164 165 166 167 168
            in1 = np.array([[1, 2, 3],
                            [4, 5, 6]])
            in2 = np.array([[11, 12, 13],
                            [14, 15, 16]])
            in3 = np.array([[21, 22],
                            [23, 24]])
169 170 171 172
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
173 174
                # When the axis is negative, the real axis is (axis + Rank(x)).
                # As follows, axis is -1, Rank(x) is 2, the real axis is 1
175 176
                out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1, x2], axis=0)
177 178 179 180 181 182 183 184
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
185
    """
186

187 188 189 190 191 192
    if in_dygraph_mode():
        if isinstance(axis, Variable):
            axis = axis.numpy()
            axis = axis.item(0)
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
193
        out = _C_ops.concat(input, axis)
194
        return out
195 196

    if _in_legacy_dygraph():
S
songyouwei 已提交
197 198
        if isinstance(axis, Variable):
            axis = axis.numpy()
199
            axis = axis.item(0)
200 201
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
202
        out = _varbase_creator()
203
        _legacy_C_ops.concat(input, out, 'axis', axis)
204
        return out
205

206 207 208 209
    check_type(input, 'input', (list, tuple, Variable), 'concat')
    if not isinstance(input, Variable):
        for id, x in enumerate(input):
            check_variable_and_dtype(
210 211
                x,
                'input[' + str(id) + ']',
212
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
213 214
                'concat',
            )
215 216
            if x.dtype != input[0].dtype:
                raise TypeError(
217 218
                    "All the Tensors in the input must have the same data type."
                )
219
    else:
220
        input = [input]
221
    check_type(axis, 'axis', (int, Variable), 'concat')
222

223 224
    if isinstance(axis, Variable):
        check_dtype(
225 226 227 228 229
            axis.dtype,
            'axis',
            ['int32', 'int64'],
            'concat',
            "The data type of axis must be int32 or int64 when axis is a Tensor",
230
        )
231

232
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
233
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
234 235

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
236 237 238 239
        # NOTE(liym27): Don't remove this if branch!
        # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
        # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.

240 241 242 243
        assert len(input) == 1, (
            "If the elements of 'input' in concat are Variable(LoDTensorArray), "
            "number of the elements must be 1, but received %s." % len(input)
        )
244
        out_index = helper.create_variable_for_type_inference(dtype="int32")
245 246 247 248 249 250
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': input[0]},
            outputs={'Out': [out], 'OutIndex': [out_index]},
            attrs={'axis': axis, 'use_stack': False},
        )
251 252 253 254 255
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
256
        attrs['axis'] = axis
257

258 259 260
        helper.append_op(
            type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs
        )
Y
Yu Yang 已提交
261 262 263
    return out


G
Guo Sheng 已提交
264
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
265
    r"""
G
Guo Sheng 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
    This function concatenates or stacks all tensors in the input LoDTensorArray
    along the axis mentioned and returns that as the output.

    For Example:

    .. code-block:: text

        Case 1:

            Given:

                input.data = {[[0.6, 0.1, 0.3],
                               [0.5, 0.3, 0.2]],
                              [[1.3],
                               [1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = False

            Then:

                output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                               [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

                output_index.data = [3, 1, 2]

        Case 2:

            Given:

                input.data = {[[0.6, 0.1],
                               [0.5, 0.3]],
                              [[0.3, 1.3],
                               [0.2, 1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = True

            Then:

                output.data = [[[0.6, 0.1]
                                [0.3, 1.3]
                                [2.3, 2.1],
                               [[0.5, 0.3]
                                [0.2, 1.8]
                                [2.5, 2.4]]]

                output_index.data = [2, 2, 2]
L
li099 已提交
316 317

    Args:
G
Guo Sheng 已提交
318 319 320 321 322 323 324
        input(Variable): A LodTensorArray variable.
        axis(int): The axis along which the tensors in attr::`input` will be
            concatenated or stacked.
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
        use_stack(bool): Act as concat_op or stack_op. For stack mode, all
            tensors in the tensor array must have the same shape.
L
li099 已提交
325 326

    Returns:
G
Guo Sheng 已提交
327 328 329
        Variable: The concatenated or stacked tensor variable.
        Variable: A 1-D tensor variable with int32 data type. The data in this \
            tensor contains all input including tensors' sizes along the axis.
L
li099 已提交
330 331 332 333

    Examples:
        .. code-block:: python

334
            import paddle
335
            import paddle.fluid as fluid
336
            import numpy as np
G
Guo Sheng 已提交
337 338 339
            x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
340 341 342
            array = paddle.tensor.create_array(dtype='float32')
            paddle.tensor.array_write(x0, i, array)
            paddle.tensor.array_write(x1, i + 1, array)
G
Guo Sheng 已提交
343
            output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
L
li099 已提交
344
    """
J
Jiabin Yang 已提交
345
    if _non_static_mode():
346
        assert isinstance(
347 348
            input, list
        ), "The 'input' in tensor_array_to_tensor must be list"
349
        from .nn import concat
350
        from ..dygraph import to_variable
351
        from paddle import stack
352

353 354 355
        op = stack if use_stack else concat
        res = op(input, axis=axis)
        sizes = to_variable(
356 357
            numpy.array(list(map(lambda x: int(x.shape[axis]), input)))
        )
358 359
        return res, sizes

360 361 362
    check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
    if isinstance(input, list):
        for i, input_x in enumerate(input):
363 364 365 366 367 368
            check_type(
                input_x,
                'input[' + str(i) + ']',
                Variable,
                'tensor_array_to_tensor',
            )
L
li099 已提交
369
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
370 371
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
372 373 374 375 376 377
    helper.append_op(
        type='tensor_array_to_tensor',
        inputs={'X': input},
        outputs={'Out': [out], 'OutIndex': [out_index]},
        attrs={'axis': axis, 'use_stack': use_stack},
    )
L
li099 已提交
378 379 380
    return out, out_index


381
def sums(input, out=None):
382
    r"""
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
404 405

    Args:
406 407 408 409
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
410 411

    Returns:
412 413
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
414 415

    Examples:
F
fengjiayi 已提交
416
        .. code-block:: python
K
kavyasrinet 已提交
417

418 419 420 421 422 423 424 425 426
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
427

428 429
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
430
    """
431 432 433
    check_type(input, 'input', (Variable, tuple, list), 'sums')
    if isinstance(input, list) or isinstance(input, tuple):
        for input_section in input:
434 435 436 437 438 439
            check_variable_and_dtype(
                input_section,
                "input",
                ['float16', 'float32', 'float64', 'int32', 'int64'],
                'sums',
            )
440
    else:
441 442 443 444 445 446
        check_variable_and_dtype(
            input,
            "input",
            ['float16', 'float32', 'float64', 'int32', 'int64'],
            'sums',
        )
447

Y
Yu Yang 已提交
448 449
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
450
        out = helper.create_variable_for_type_inference(
451 452
            dtype=helper.input_dtype()
        )
453
    else:
454 455 456 457 458 459 460 461 462 463
        check_variable_and_dtype(
            out, "out", ['float32', 'float64', 'int32', 'int64'], 'sums'
        )

    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False},
    )
Y
Yu Yang 已提交
464 465 466
    return out


F
fengjiayi 已提交
467
def assign(input, output=None):
468
    """
S
swtkiwi 已提交
469

470
    The OP copies the :attr:`input` to the :attr:`output`.
471

472
    Parameters:
473 474 475 476
        input (Tensor|numpy.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar,
            or scalar. Its data type supports float16, float32, float64, int32, int64, and bool.
            Note: the float64 data will be converted to float32 because of current platform protobuf
            data limitation.
477
        output (Tensor, optional): A tensor. If :attr:`output` is None, a new tensor will
478
            be created as :attr:`output`. Default: None.
479 480

    Returns:
481
        Tensor: A tensor with the same shape, data type and value as :attr:`input`.
482 483 484

    Examples:
        .. code-block:: python
485

486
          import paddle
487
          import numpy as np
488
          data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
489 490 491 492
          array = np.array([[1, 1],
                            [3, 4],
                            [1, 3]]).astype(np.int64)
          result1 = paddle.zeros(shape=[3, 3], dtype='float32')
493 494 495
          paddle.assign(array, result1) # result1 = [[1, 1], [3 4], [1, 3]]
          result2 = paddle.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
496
    """
Y
Yu Yang 已提交
497
    helper = LayerHelper('assign', **locals())
498 499 500 501 502 503
    check_type(
        input,
        'input',
        (Variable, numpy.ndarray, list, tuple, float, int, bool),
        'assign',
    )
504 505
    is_inplace = True if output is not None else False

506 507 508 509
    if numpy.isscalar(input) and not isinstance(input, str):
        input = numpy.array([input])
    elif isinstance(input, (list, tuple)):
        input = numpy.array(input)
510 511
    # NOTE(Aurelius84): Why we judge core.VarBase?
    # In case of @to_static, a VarBase can be as input of `assign`,
J
Jiabin Yang 已提交
512
    # but _non_static_mode()==False under @to_static, which means
513 514 515
    # isinstance(VarBase, Variable) == False. It will cause return None
    # after this api.
    if isinstance(input, (Variable, core.VarBase)):
516
        if _non_static_mode():
C
chentianyu03 已提交
517
            if in_dygraph_mode() and output is None:
518
                output = _C_ops.assign(input)
519 520
            elif in_dygraph_mode() and output is not None:
                _C_ops.assign_out_(input, output)
C
chentianyu03 已提交
521 522 523 524 525 526
            else:
                if output is None:
                    if _in_legacy_dygraph():
                        output = core.VarBase()
                    else:
                        output = core.eager.Tensor()
527
                _legacy_C_ops.assign(input, output)
528
        else:
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
            check_dtype(
                input.dtype,
                'input',
                [
                    'float16',
                    'uint16',
                    'float32',
                    'float64',
                    'int32',
                    'int64',
                    'uint8',
                    'bool',
                ],
                'assign',
                '(When the type of input in assign is Variable.)',
            )
545 546
            if output is None:
                output = helper.create_variable_for_type_inference(
547 548 549 550 551
                    dtype=input.dtype
                )
            helper.append_op(
                type='assign', inputs={'X': [input]}, outputs={'Out': [output]}
            )
X
xuwei06 已提交
552
    elif isinstance(input, numpy.ndarray):
553 554 555 556 557
        # Not support [var, var, ...] currently.
        if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
            raise TypeError(
                "Required type(input) numpy.ndarray, but found `list(Variable)` in input."
            )
X
xuwei06 已提交
558
        dtype = convert_np_dtype_to_dtype_(input.dtype)
559 560 561 562 563 564
        if dtype == VarDesc.VarType.FP64:
            # Setting FP64 numpy data is not supported in Paddle, so we
            # use FP32 here
            warnings.warn(
                "paddle.assign doesn't support float64 input now due "
                "to current platform protobuf data limitation, we convert "
565 566
                "it to float32"
            )
567
            dtype = VarDesc.VarType.FP32
568 569
        if dtype == VarDesc.VarType.BOOL:
            value_name = "bool_values"
W
wanghuancoder 已提交
570
            values = [int(v) for v in input.flat]
571
        elif dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
572
            value_name = "fp32_values"
573
            values = [float(v) for v in input.flat]
574
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
575
            value_name = "int32_values"
576
            values = [int(v) for v in input.flat]
577 578 579
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
580
        else:
581 582
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
583
                "the data type of 'input' must be bool, float32, int32 or int64, but "
584 585
                "received %s." % convert_dtype(dtype)
            )
586
        if input.size > 1024 * 1024:
587 588 589 590
            raise ValueError(
                "The size of input is too big. Please consider "
                "saving it to file and 'load_op' to load it"
            )
591 592 593
        if in_dygraph_mode():
            if output is None:
                output = zeros(list(input.shape), dtype)
594 595 596 597 598 599 600
            _C_ops.assign_value_(
                output,
                list(input.shape),
                dtype,
                values,
                _current_expected_place(),
            )
601 602 603
        elif _in_legacy_dygraph():
            if output is None:
                output = core.VarBase()
604 605 606 607 608 609 610 611 612
            _legacy_C_ops.assign_value(
                output,
                'shape',
                list(input.shape),
                'dtype',
                dtype,
                value_name,
                values,
            )
613
        else:
614 615
            if output is None:
                output = helper.create_variable_for_type_inference(
616 617 618 619 620 621 622 623 624 625 626
                    dtype=input.dtype
                )
            helper.append_op(
                type='assign_value',
                outputs={'Out': [output]},
                attrs={
                    'dtype': dtype,
                    'shape': list(input.shape),
                    value_name: values,
                },
            )
X
xuwei06 已提交
627

J
Jiabin Yang 已提交
628
    if is_inplace and _non_static_mode():
629
        output._bump_inplace_version()
630

Y
Yu Yang 已提交
631 632 633
    return output


634
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
Y
Yu Yang 已提交
635
    """
S
swtkiwi 已提交
636

W
wangchaochaohu 已提交
637
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
638
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
639

T
tianshuo78520a 已提交
640
    The attribute `stop_gradient` of the created Tensor is set to True.
641 642

    Args:
643 644 645
        shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64.
            If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
            If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64.
W
wangchaochaohu 已提交
646
        dtype(np.dtype|str): Data type of the output Tensor which can
647
            be float16, float32, float64, uint8, int16, int32, int64.
648
        value(bool|float|int|Tensor): The constant value used to initialize
649 650
            the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor.
        force_cpu(bool, optional): data should be on CPU if it's true, default value is False.
651
        out(Tensor, optional): Optional output which can be any created
652 653
            Tensor that meets the requirements to store the result of operation.
            if ``out`` is None, a new Tensor will be create to store the result.
654 655
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
656 657

    Returns:
658
        Tensor: Tensor which is created according to shape and dtype.
W
wangchaochaohu 已提交
659

660 661 662
    Examples:
        .. code-block:: python

663
          import paddle.fluid as fluid
664
          # attr shape is a list which doesn't contain  Tensor.
665 666
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
667
          # data1=[[5], [5]] data2=[[5], [5]]
668

669
          # attr shape is a list which contains Tensor.
670
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
671
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]]
672

673
          # attr shape is a Tensor.
674
          shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
675
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
676

677
          # attr value is a Tensor.
W
wangchaochaohu 已提交
678 679
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
Y
Yu Yang 已提交
680
    """
681

W
wangchaochaohu 已提交
682
    attrs = {'force_cpu': force_cpu}
683
    dtype = convert_dtype(dtype)
684
    if not isinstance(value, Variable):
685
        if dtype in ['uint8', 'int16', 'int32', 'int64']:
W
wangchaochaohu 已提交
686
            attrs['str_value'] = str(int(value))
687
            attrs['value'] = int(value)
W
wangchaochaohu 已提交
688 689
        else:
            attrs['str_value'] = str(float(value))
690
            attrs['value'] = float(value)
691

692 693 694 695 696
    if in_dygraph_mode():
        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
        if isinstance(shape, (list, tuple)):
697
            shape = utils.convert_shape_to_list(shape)
698 699 700 701 702

        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        if out is None:
703
            out = _C_ops.full(shape, float(value), dtype, place)
704 705 706
            out.stop_gradient = True
            return out

707 708
        if out is not None:
            # final state mode is support out is not None.
709
            _C_ops.full_(out, shape, float(value), dtype, place)
710 711
            out.stop_gradient = True
            return out
712

713 714 715 716 717 718 719 720 721 722 723
    if _in_legacy_dygraph():
        shape = utils.convert_shape_to_list(shape)
        if out is None:
            out = _varbase_creator(dtype=dtype)

        if isinstance(value, Variable):
            if dtype in ['uint8', 'int16', 'int32', 'int64']:
                attrs['str_value'] = str(int(value.numpy().item(0)))
            else:
                attrs['str_value'] = str(float(value.numpy().item(0)))

724 725 726 727 728 729 730 731 732 733 734 735 736
        _legacy_C_ops.fill_constant(
            out,
            'value',
            float(value),
            'force_cpu',
            force_cpu,
            'dtype',
            out.dtype,
            'str_value',
            attrs['str_value'],
            'shape',
            shape,
        )
737 738 739
        out.stop_gradient = True
        return out

740 741 742
    helper = LayerHelper("fill_constant", **locals())
    inputs = {}
    if isinstance(value, Variable):
743 744
        if convert_dtype(value.dtype) != dtype:
            value = cast(value, dtype)
745 746
        inputs['ValueTensor'] = value

747
    check_shape(shape)
748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'uint8',
            'int16',
            'int32',
            'int64',
            'complex64',
            'complex128',
        ],
        'fill_constant',
    )
765
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
766

767
    if out is not None:
768 769 770
        check_variable_and_dtype(
            out, 'out', [convert_dtype(dtype)], 'fill_constant'
        )
771 772

    helper = LayerHelper("fill_constant", **locals())
773 774 775
    utils.get_shape_tensor_inputs(
        inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant'
    )
L
liym27 已提交
776

Y
Yu Yang 已提交
777
    if out is None:
X
Xin Pan 已提交
778
        out = helper.create_variable_for_type_inference(dtype=dtype)
L
liym27 已提交
779
    attrs['dtype'] = out.dtype
780 781 782 783 784 785 786
    helper.append_op(
        type='fill_constant',
        inputs=inputs,
        outputs={'Out': [out]},
        attrs=attrs,
        stop_gradient=True,
    )
Y
Yu Yang 已提交
787 788 789 790
    out.stop_gradient = True
    return out


791
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
Y
yuyang18 已提交
792
@templatedoc()
793 794 795 796 797 798 799 800 801
def fill_constant_batch_size_like(
    input,
    shape,
    dtype,
    value,
    input_dim_idx=0,
    output_dim_idx=0,
    force_cpu=False,
):
802
    """
T
tianshuo78520a 已提交
803
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
804 805 806 807
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
808 809

    Args:
W
wangchaochaohu 已提交
810 811 812 813 814
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
815
        value(float|int): The constant value used to initialize the Tensor to be created.
W
wangchaochaohu 已提交
816 817 818 819 820
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
821
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
822 823

    Returns:
W
wangchaochaohu 已提交
824
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
825 826 827 828 829

    Examples:

        .. code-block:: python

830
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
831
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
832
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
833
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
834

835
    """
836 837 838 839 840 841 842
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
843 844 845
        out = _C_ops.full_batch_size_like(
            input, shape, dtype, value, input_dim_idx, output_dim_idx, place
        )
846 847 848
        out.stop_gradient = True
        return out

Y
Yu Yang 已提交
849
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
850
    out = helper.create_variable_for_type_inference(dtype=dtype)
851 852 853 854 855 856
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
857
        'force_cpu': force_cpu,
858 859 860 861 862
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
863 864 865 866 867 868
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
        attrs=attrs,
    )
Y
Yu Yang 已提交
869 870 871 872
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
873 874
def argmin(x, axis=0):
    """
875 876 877
        :alias_main: paddle.argmin
        :alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
        :old_api: paddle.fluid.layers.argmin
S
swtkiwi 已提交
878

S
sneaxiy 已提交
879 880
    **argmin**

881 882
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
883 884

    Args:
885 886 887 888 889
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
890

S
sneaxiy 已提交
891
    Returns:
892
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
893

S
sneaxiy 已提交
894 895
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
896

897
            import paddle.fluid as fluid
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
925
    """
926
    check_variable_and_dtype(
927 928 929 930 931
        x,
        'x',
        ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmin',
    )
S
sneaxiy 已提交
932
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
933
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
934 935 936 937 938 939
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis},
    )
940
    out.stop_gradient = True
S
sneaxiy 已提交
941 942 943 944 945 946 947
    return out


def argmax(x, axis=0):
    """
    **argmax**

948 949
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
950 951

    Args:
952 953 954 955 956
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
957

S
sneaxiy 已提交
958
    Returns:
959
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
960

S
sneaxiy 已提交
961 962
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
963

964
            import paddle.fluid as fluid
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
992
    """
993
    check_variable_and_dtype(
994 995 996 997 998
        x,
        'x',
        ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmax',
    )
S
sneaxiy 已提交
999
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
1000
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
1001 1002 1003 1004 1005 1006
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis},
    )
1007
    out.stop_gradient = True
S
sneaxiy 已提交
1008 1009 1010
    return out


1011
def zeros(shape, dtype, force_cpu=False, name=None):
Y
Yu Yang 已提交
1012
    """
1013 1014
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1015

1016
    Parameters:
1017
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
W
wangchaochaohu 已提交
1018
        dtype (np.dtype|str): Data type of output Tensor, it supports
1019
            bool, float16, float32, float64, int32 and int64.
1020 1021
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1022
            Default: False.
1023 1024
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
1025 1026

    Returns:
1027
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
1028 1029 1030 1031

    Examples:
        .. code-block:: python

1032
          import paddle.fluid as fluid
1033
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
1034

1035 1036 1037
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
Y
Yu Yang 已提交
1038 1039
    """
    return fill_constant(value=0.0, **locals())