tensor.py 31.6 KB
Newer Older
1
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17
import numpy
import warnings

Y
Yu Yang 已提交
18
from ..layer_helper import LayerHelper
19 20 21 22 23 24 25 26
from ..framework import (
    _current_expected_place,
    convert_np_dtype_to_dtype_,
    _non_static_mode,
    _varbase_creator,
    _in_legacy_dygraph,
    in_dygraph_mode,
)
X
xuwei06 已提交
27
from ..framework import Variable
28
from ..core import VarDesc
29
from .. import core
30
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
31
from . import utils
32 33 34 35 36 37
from ..data_feeder import (
    check_variable_and_dtype,
    check_type,
    check_dtype,
    convert_dtype,
)
38
from paddle.utils import deprecated
39

40
from .utils import check_shape
41
from paddle import _C_ops, _legacy_C_ops
Y
Yu Yang 已提交
42 43

__all__ = [
44 45 46 47 48 49 50 51 52
    'cast',
    'concat',
    'sums',
    'assign',
    'fill_constant_batch_size_like',
    'fill_constant',
    'argmin',
    'argmax',
    'zeros',
Y
Yu Yang 已提交
53 54 55
]


56
def cast(x, dtype):
Y
Yu Yang 已提交
57
    """
S
swtkiwi 已提交
58

59
    This OP takes in the Tensor :attr:`x` with :attr:`x.dtype` and casts it
60 61
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
62 63

    Args:
64
        x(Tensor): An input N-D Tensor with data type bool, float16,
65
            float32, float64, int32, int64, uint8.
66
        dtype(np.dtype|str): Data type of the output:
67
            bool, float16, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
68 69

    Returns:
70
        Tensor: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
71 72 73

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
74

75
            import paddle
76

77 78
            x = paddle.to_tensor([2, 3, 4], 'float64')
            y = paddle.cast(x, 'uint8')
Y
Yu Yang 已提交
79
    """
H
hong 已提交
80 81 82
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
83
        return _C_ops.cast(x, dtype)
H
hong 已提交
84

J
Jiabin Yang 已提交
85
    if _non_static_mode():
86 87
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
88
        out = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
Z
Zhang Ting 已提交
89
        return out
90

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
    check_variable_and_dtype(
        x,
        'x',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'cast',
    )
    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int8',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'cast',
    )
124 125

    helper = LayerHelper('cast', **locals())
126
    out = helper.create_variable_for_type_inference(
127 128 129 130 131 132 133 134
        dtype=dtype, stop_gradient=x.stop_gradient
    )
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype},
    )
Y
Yu Yang 已提交
135 136 137
    return out


138
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
139
    """
140
    This OP concatenates the input along the axis.
141 142

    Args:
143
        input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type
144
            bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type.
145 146
        axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
            It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64.
147
            The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way
148
            as ``axis+R``. Default is 0.
149 150 151
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
152 153

    Returns:
154
        Tensor: A Tensor with the same data type as ``input``.
155 156 157

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
158

159
            import paddle.fluid as fluid
160 161
            import numpy as np

162 163 164 165 166 167
            in1 = np.array([[1, 2, 3],
                            [4, 5, 6]])
            in2 = np.array([[11, 12, 13],
                            [14, 15, 16]])
            in3 = np.array([[21, 22],
                            [23, 24]])
168 169 170 171
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
172 173
                # When the axis is negative, the real axis is (axis + Rank(x)).
                # As follows, axis is -1, Rank(x) is 2, the real axis is 1
174 175
                out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1, x2], axis=0)
176 177 178 179 180 181 182 183
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
184
    """
185

186 187 188 189 190 191
    if in_dygraph_mode():
        if isinstance(axis, Variable):
            axis = axis.numpy()
            axis = axis.item(0)
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
192
        out = _C_ops.concat(input, axis)
193
        return out
194 195

    if _in_legacy_dygraph():
S
songyouwei 已提交
196 197
        if isinstance(axis, Variable):
            axis = axis.numpy()
198
            axis = axis.item(0)
199 200
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
201
        out = _varbase_creator()
202
        _legacy_C_ops.concat(input, out, 'axis', axis)
203
        return out
204

205 206 207 208
    check_type(input, 'input', (list, tuple, Variable), 'concat')
    if not isinstance(input, Variable):
        for id, x in enumerate(input):
            check_variable_and_dtype(
209 210
                x,
                'input[' + str(id) + ']',
211
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
212 213
                'concat',
            )
214 215
            if x.dtype != input[0].dtype:
                raise TypeError(
216 217
                    "All the Tensors in the input must have the same data type."
                )
218
    else:
219
        input = [input]
220
    check_type(axis, 'axis', (int, Variable), 'concat')
221

222 223
    if isinstance(axis, Variable):
        check_dtype(
224 225 226 227 228
            axis.dtype,
            'axis',
            ['int32', 'int64'],
            'concat',
            "The data type of axis must be int32 or int64 when axis is a Tensor",
229
        )
230

231
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
232
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
233 234

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
235 236 237 238
        # NOTE(liym27): Don't remove this if branch!
        # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
        # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.

239 240 241 242
        assert len(input) == 1, (
            "If the elements of 'input' in concat are Variable(LoDTensorArray), "
            "number of the elements must be 1, but received %s." % len(input)
        )
243
        out_index = helper.create_variable_for_type_inference(dtype="int32")
244 245 246 247 248 249
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': input[0]},
            outputs={'Out': [out], 'OutIndex': [out_index]},
            attrs={'axis': axis, 'use_stack': False},
        )
250 251 252 253 254
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
255
        attrs['axis'] = axis
256

257 258 259
        helper.append_op(
            type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs
        )
Y
Yu Yang 已提交
260 261 262
    return out


263
def sums(input, out=None):
264
    r"""
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
286 287

    Args:
288 289 290 291
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
292 293

    Returns:
294 295
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
296 297

    Examples:
F
fengjiayi 已提交
298
        .. code-block:: python
K
kavyasrinet 已提交
299

300 301 302 303 304 305 306 307 308
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
309

310 311
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
312
    """
313 314 315
    check_type(input, 'input', (Variable, tuple, list), 'sums')
    if isinstance(input, list) or isinstance(input, tuple):
        for input_section in input:
316 317 318 319 320 321
            check_variable_and_dtype(
                input_section,
                "input",
                ['float16', 'float32', 'float64', 'int32', 'int64'],
                'sums',
            )
322
    else:
323 324 325 326 327 328
        check_variable_and_dtype(
            input,
            "input",
            ['float16', 'float32', 'float64', 'int32', 'int64'],
            'sums',
        )
329

Y
Yu Yang 已提交
330 331
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
332
        out = helper.create_variable_for_type_inference(
333 334
            dtype=helper.input_dtype()
        )
335
    else:
336 337 338 339 340 341 342 343 344 345
        check_variable_and_dtype(
            out, "out", ['float32', 'float64', 'int32', 'int64'], 'sums'
        )

    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False},
    )
Y
Yu Yang 已提交
346 347 348
    return out


F
fengjiayi 已提交
349
def assign(input, output=None):
350
    """
S
swtkiwi 已提交
351

352
    The OP copies the :attr:`input` to the :attr:`output`.
353

354
    Parameters:
355 356 357 358
        input (Tensor|numpy.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar,
            or scalar. Its data type supports float16, float32, float64, int32, int64, and bool.
            Note: the float64 data will be converted to float32 because of current platform protobuf
            data limitation.
359
        output (Tensor, optional): A tensor. If :attr:`output` is None, a new tensor will
360
            be created as :attr:`output`. Default: None.
361 362

    Returns:
363
        Tensor: A tensor with the same shape, data type and value as :attr:`input`.
364 365 366

    Examples:
        .. code-block:: python
367

368
          import paddle
369
          import numpy as np
370
          data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
371 372 373 374
          array = np.array([[1, 1],
                            [3, 4],
                            [1, 3]]).astype(np.int64)
          result1 = paddle.zeros(shape=[3, 3], dtype='float32')
375 376 377
          paddle.assign(array, result1) # result1 = [[1, 1], [3 4], [1, 3]]
          result2 = paddle.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
378
    """
Y
Yu Yang 已提交
379
    helper = LayerHelper('assign', **locals())
380 381 382 383 384 385
    check_type(
        input,
        'input',
        (Variable, numpy.ndarray, list, tuple, float, int, bool),
        'assign',
    )
386 387
    is_inplace = True if output is not None else False

388 389 390 391
    if numpy.isscalar(input) and not isinstance(input, str):
        input = numpy.array([input])
    elif isinstance(input, (list, tuple)):
        input = numpy.array(input)
392 393
    # NOTE(Aurelius84): Why we judge core.VarBase?
    # In case of @to_static, a VarBase can be as input of `assign`,
J
Jiabin Yang 已提交
394
    # but _non_static_mode()==False under @to_static, which means
395 396 397
    # isinstance(VarBase, Variable) == False. It will cause return None
    # after this api.
    if isinstance(input, (Variable, core.VarBase)):
398
        if _non_static_mode():
C
chentianyu03 已提交
399
            if in_dygraph_mode() and output is None:
400
                output = _C_ops.assign(input)
401 402
            elif in_dygraph_mode() and output is not None:
                _C_ops.assign_out_(input, output)
C
chentianyu03 已提交
403 404 405 406 407 408
            else:
                if output is None:
                    if _in_legacy_dygraph():
                        output = core.VarBase()
                    else:
                        output = core.eager.Tensor()
409
                _legacy_C_ops.assign(input, output)
410
        else:
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
            check_dtype(
                input.dtype,
                'input',
                [
                    'float16',
                    'uint16',
                    'float32',
                    'float64',
                    'int32',
                    'int64',
                    'uint8',
                    'bool',
                ],
                'assign',
                '(When the type of input in assign is Variable.)',
            )
427 428
            if output is None:
                output = helper.create_variable_for_type_inference(
429 430 431 432 433
                    dtype=input.dtype
                )
            helper.append_op(
                type='assign', inputs={'X': [input]}, outputs={'Out': [output]}
            )
X
xuwei06 已提交
434
    elif isinstance(input, numpy.ndarray):
435 436 437 438 439
        # Not support [var, var, ...] currently.
        if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
            raise TypeError(
                "Required type(input) numpy.ndarray, but found `list(Variable)` in input."
            )
X
xuwei06 已提交
440
        dtype = convert_np_dtype_to_dtype_(input.dtype)
441 442 443 444 445 446
        if dtype == VarDesc.VarType.FP64:
            # Setting FP64 numpy data is not supported in Paddle, so we
            # use FP32 here
            warnings.warn(
                "paddle.assign doesn't support float64 input now due "
                "to current platform protobuf data limitation, we convert "
447 448
                "it to float32"
            )
449
            dtype = VarDesc.VarType.FP32
450 451
        if dtype == VarDesc.VarType.BOOL:
            value_name = "bool_values"
W
wanghuancoder 已提交
452
            values = [int(v) for v in input.flat]
453
        elif dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
454
            value_name = "fp32_values"
455
            values = [float(v) for v in input.flat]
456
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
457
            value_name = "int32_values"
458
            values = [int(v) for v in input.flat]
459 460 461
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
462
        else:
463 464
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
465
                "the data type of 'input' must be bool, float32, int32 or int64, but "
466 467
                "received %s." % convert_dtype(dtype)
            )
468
        if input.size > 1024 * 1024:
469 470 471 472
            raise ValueError(
                "The size of input is too big. Please consider "
                "saving it to file and 'load_op' to load it"
            )
473 474 475
        if in_dygraph_mode():
            if output is None:
                output = zeros(list(input.shape), dtype)
476 477 478 479 480 481 482
            _C_ops.assign_value_(
                output,
                list(input.shape),
                dtype,
                values,
                _current_expected_place(),
            )
483 484 485
        elif _in_legacy_dygraph():
            if output is None:
                output = core.VarBase()
486 487 488 489 490 491 492 493 494
            _legacy_C_ops.assign_value(
                output,
                'shape',
                list(input.shape),
                'dtype',
                dtype,
                value_name,
                values,
            )
495
        else:
496 497
            if output is None:
                output = helper.create_variable_for_type_inference(
498 499 500 501 502 503 504 505 506 507 508
                    dtype=input.dtype
                )
            helper.append_op(
                type='assign_value',
                outputs={'Out': [output]},
                attrs={
                    'dtype': dtype,
                    'shape': list(input.shape),
                    value_name: values,
                },
            )
X
xuwei06 已提交
509

J
Jiabin Yang 已提交
510
    if is_inplace and _non_static_mode():
511
        output._bump_inplace_version()
512

Y
Yu Yang 已提交
513 514 515
    return output


516
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
Y
Yu Yang 已提交
517
    """
S
swtkiwi 已提交
518

W
wangchaochaohu 已提交
519
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
520
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
521

T
tianshuo78520a 已提交
522
    The attribute `stop_gradient` of the created Tensor is set to True.
523 524

    Args:
525 526 527
        shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64.
            If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
            If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64.
W
wangchaochaohu 已提交
528
        dtype(np.dtype|str): Data type of the output Tensor which can
529
            be float16, float32, float64, uint8, int16, int32, int64.
530
        value(bool|float|int|Tensor): The constant value used to initialize
531 532
            the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor.
        force_cpu(bool, optional): data should be on CPU if it's true, default value is False.
533
        out(Tensor, optional): Optional output which can be any created
534 535
            Tensor that meets the requirements to store the result of operation.
            if ``out`` is None, a new Tensor will be create to store the result.
536 537
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
538 539

    Returns:
540
        Tensor: Tensor which is created according to shape and dtype.
W
wangchaochaohu 已提交
541

542 543 544
    Examples:
        .. code-block:: python

545
          import paddle.fluid as fluid
546
          # attr shape is a list which doesn't contain  Tensor.
547 548
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
549
          # data1=[[5], [5]] data2=[[5], [5]]
550

551
          # attr shape is a list which contains Tensor.
552
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
553
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]]
554

555
          # attr shape is a Tensor.
556
          shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
557
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
558

559
          # attr value is a Tensor.
W
wangchaochaohu 已提交
560 561
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
Y
Yu Yang 已提交
562
    """
563

W
wangchaochaohu 已提交
564
    attrs = {'force_cpu': force_cpu}
565
    dtype = convert_dtype(dtype)
566
    if not isinstance(value, Variable):
567
        if dtype in ['uint8', 'int16', 'int32', 'int64']:
W
wangchaochaohu 已提交
568
            attrs['str_value'] = str(int(value))
569
            attrs['value'] = int(value)
W
wangchaochaohu 已提交
570 571
        else:
            attrs['str_value'] = str(float(value))
572
            attrs['value'] = float(value)
573

574 575 576 577 578
    if in_dygraph_mode():
        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
        if isinstance(shape, (list, tuple)):
579
            shape = utils.convert_shape_to_list(shape)
580 581 582 583 584

        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        if out is None:
585
            out = _C_ops.full(shape, float(value), dtype, place)
586 587 588
            out.stop_gradient = True
            return out

589 590
        if out is not None:
            # final state mode is support out is not None.
591
            _C_ops.full_(out, shape, float(value), dtype, place)
592 593
            out.stop_gradient = True
            return out
594

595 596 597 598 599 600 601 602 603 604 605
    if _in_legacy_dygraph():
        shape = utils.convert_shape_to_list(shape)
        if out is None:
            out = _varbase_creator(dtype=dtype)

        if isinstance(value, Variable):
            if dtype in ['uint8', 'int16', 'int32', 'int64']:
                attrs['str_value'] = str(int(value.numpy().item(0)))
            else:
                attrs['str_value'] = str(float(value.numpy().item(0)))

606 607 608 609 610 611 612 613 614 615 616 617 618
        _legacy_C_ops.fill_constant(
            out,
            'value',
            float(value),
            'force_cpu',
            force_cpu,
            'dtype',
            out.dtype,
            'str_value',
            attrs['str_value'],
            'shape',
            shape,
        )
619 620 621
        out.stop_gradient = True
        return out

622 623 624
    helper = LayerHelper("fill_constant", **locals())
    inputs = {}
    if isinstance(value, Variable):
625 626
        if convert_dtype(value.dtype) != dtype:
            value = cast(value, dtype)
627 628
        inputs['ValueTensor'] = value

629
    check_shape(shape)
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646
    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'uint8',
            'int16',
            'int32',
            'int64',
            'complex64',
            'complex128',
        ],
        'fill_constant',
    )
647
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
648

649
    if out is not None:
650 651 652
        check_variable_and_dtype(
            out, 'out', [convert_dtype(dtype)], 'fill_constant'
        )
653 654

    helper = LayerHelper("fill_constant", **locals())
655 656 657
    utils.get_shape_tensor_inputs(
        inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant'
    )
L
liym27 已提交
658

Y
Yu Yang 已提交
659
    if out is None:
X
Xin Pan 已提交
660
        out = helper.create_variable_for_type_inference(dtype=dtype)
L
liym27 已提交
661
    attrs['dtype'] = out.dtype
662 663 664 665 666 667 668
    helper.append_op(
        type='fill_constant',
        inputs=inputs,
        outputs={'Out': [out]},
        attrs=attrs,
        stop_gradient=True,
    )
Y
Yu Yang 已提交
669 670 671 672
    out.stop_gradient = True
    return out


673
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
Y
yuyang18 已提交
674
@templatedoc()
675 676 677 678 679 680 681 682 683
def fill_constant_batch_size_like(
    input,
    shape,
    dtype,
    value,
    input_dim_idx=0,
    output_dim_idx=0,
    force_cpu=False,
):
684
    """
T
tianshuo78520a 已提交
685
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
686 687 688 689
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
690 691

    Args:
W
wangchaochaohu 已提交
692 693 694 695 696
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
697
        value(float|int): The constant value used to initialize the Tensor to be created.
W
wangchaochaohu 已提交
698 699 700 701 702
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
703
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
704 705

    Returns:
W
wangchaochaohu 已提交
706
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
707 708 709 710 711

    Examples:

        .. code-block:: python

712
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
713
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
714
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
715
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
716

717
    """
718 719 720 721 722 723 724
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
725 726 727
        out = _C_ops.full_batch_size_like(
            input, shape, dtype, value, input_dim_idx, output_dim_idx, place
        )
728 729 730
        out.stop_gradient = True
        return out

Y
Yu Yang 已提交
731
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
732
    out = helper.create_variable_for_type_inference(dtype=dtype)
733 734 735 736 737 738
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
739
        'force_cpu': force_cpu,
740 741 742 743 744
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
745 746 747 748 749 750
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
        attrs=attrs,
    )
Y
Yu Yang 已提交
751 752 753 754
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
755 756
def argmin(x, axis=0):
    """
757 758 759
        :alias_main: paddle.argmin
        :alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
        :old_api: paddle.fluid.layers.argmin
S
swtkiwi 已提交
760

S
sneaxiy 已提交
761 762
    **argmin**

763 764
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
765 766

    Args:
767 768 769 770 771
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
772

S
sneaxiy 已提交
773
    Returns:
774
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
775

S
sneaxiy 已提交
776 777
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
778

779
            import paddle.fluid as fluid
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
807
    """
808
    check_variable_and_dtype(
809 810 811 812 813
        x,
        'x',
        ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmin',
    )
S
sneaxiy 已提交
814
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
815
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
816 817 818 819 820 821
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis},
    )
822
    out.stop_gradient = True
S
sneaxiy 已提交
823 824 825 826 827 828 829
    return out


def argmax(x, axis=0):
    """
    **argmax**

830 831
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
832 833

    Args:
834 835 836 837 838
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
839

S
sneaxiy 已提交
840
    Returns:
841
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
842

S
sneaxiy 已提交
843 844
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
845

846
            import paddle.fluid as fluid
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
874
    """
875
    check_variable_and_dtype(
876 877 878 879 880
        x,
        'x',
        ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmax',
    )
S
sneaxiy 已提交
881
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
882
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
883 884 885 886 887 888
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis},
    )
889
    out.stop_gradient = True
S
sneaxiy 已提交
890 891 892
    return out


893
def zeros(shape, dtype, force_cpu=False, name=None):
Y
Yu Yang 已提交
894
    """
895 896
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
897

898
    Parameters:
899
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
W
wangchaochaohu 已提交
900
        dtype (np.dtype|str): Data type of output Tensor, it supports
901
            bool, float16, float32, float64, int32 and int64.
902 903
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
904
            Default: False.
905 906
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
907 908

    Returns:
909
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
910 911 912 913

    Examples:
        .. code-block:: python

914
          import paddle.fluid as fluid
915
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
916

917 918 919
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
Y
Yu Yang 已提交
920 921
    """
    return fill_constant(value=0.0, **locals())