tensor.py 35.6 KB
Newer Older
1
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17
import numpy
import warnings

Y
Yu Yang 已提交
18
from ..layer_helper import LayerHelper
19 20 21 22 23 24 25 26
from ..framework import (
    _current_expected_place,
    convert_np_dtype_to_dtype_,
    _non_static_mode,
    _varbase_creator,
    _in_legacy_dygraph,
    in_dygraph_mode,
)
X
xuwei06 已提交
27
from ..framework import Variable
28
from ..core import VarDesc
29
from .. import core
30
from .layer_function_generator import templatedoc
L
Leo Chen 已提交
31
from . import utils
32 33 34 35 36 37
from ..data_feeder import (
    check_variable_and_dtype,
    check_type,
    check_dtype,
    convert_dtype,
)
38
from paddle.utils import deprecated
39

40
from .utils import check_shape
41
from paddle import _C_ops, _legacy_C_ops
Y
Yu Yang 已提交
42 43

__all__ = [
44 45 46 47 48 49 50 51 52 53
    'cast',
    'tensor_array_to_tensor',
    'concat',
    'sums',
    'assign',
    'fill_constant_batch_size_like',
    'fill_constant',
    'argmin',
    'argmax',
    'zeros',
Y
Yu Yang 已提交
54 55 56
]


57
def cast(x, dtype):
Y
Yu Yang 已提交
58
    """
S
swtkiwi 已提交
59

60
    This OP takes in the Tensor :attr:`x` with :attr:`x.dtype` and casts it
61 62
    to the output with :attr:`dtype`. It's meaningless if the output dtype
    equals the input dtype, but it's fine if you do so.
Y
Yibing Liu 已提交
63 64

    Args:
65
        x(Tensor): An input N-D Tensor with data type bool, float16,
66
            float32, float64, int32, int64, uint8.
67
        dtype(np.dtype|str): Data type of the output:
68
            bool, float16, float32, float64, int8, int32, int64, uint8.
Y
Yibing Liu 已提交
69 70

    Returns:
71
        Tensor: A Tensor with the same shape as input's.
Y
Yibing Liu 已提交
72 73 74

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
75

76
            import paddle
77

78 79
            x = paddle.to_tensor([2, 3, 4], 'float64')
            y = paddle.cast(x, 'uint8')
Y
Yu Yang 已提交
80
    """
H
hong 已提交
81 82 83
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
84
        return _C_ops.cast(x, dtype)
H
hong 已提交
85

J
Jiabin Yang 已提交
86
    if _non_static_mode():
87 88
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)
89
        out = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
Z
Zhang Ting 已提交
90
        return out
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
    check_variable_and_dtype(
        x,
        'x',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'cast',
    )
    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'int8',
            'int16',
            'int32',
            'int64',
            'uint8',
            'uint16',
        ],
        'cast',
    )
125 126

    helper = LayerHelper('cast', **locals())
127
    out = helper.create_variable_for_type_inference(
128 129 130 131 132 133 134 135
        dtype=dtype, stop_gradient=x.stop_gradient
    )
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype, 'out_dtype': out.dtype},
    )
Y
Yu Yang 已提交
136 137 138
    return out


139
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
140
    """
141
    This OP concatenates the input along the axis.
142 143

    Args:
144
        input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type
145
            bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type.
146 147
        axis(int|Tensor, optional): Specify the axis to operate on the input Tensors.
            It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64.
148
            The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way
149
            as ``axis+R``. Default is 0.
150 151 152
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
153 154

    Returns:
155
        Tensor: A Tensor with the same data type as ``input``.
156 157 158

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
159

160
            import paddle.fluid as fluid
161 162
            import numpy as np

163 164 165 166 167 168
            in1 = np.array([[1, 2, 3],
                            [4, 5, 6]])
            in2 = np.array([[11, 12, 13],
                            [14, 15, 16]])
            in3 = np.array([[21, 22],
                            [23, 24]])
169 170 171 172
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                x2 = fluid.dygraph.to_variable(in2)
                x3 = fluid.dygraph.to_variable(in3)
173 174
                # When the axis is negative, the real axis is (axis + Rank(x)).
                # As follows, axis is -1, Rank(x) is 2, the real axis is 1
175 176
                out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
                out2 = fluid.layers.concat(input=[x1, x2], axis=0)
177 178 179 180 181 182 183 184
                print(out1.numpy())
                # [[ 1  2  3 11 12 13 21 22]
                #  [ 4  5  6 14 15 16 23 24]]
                print(out2.numpy())
                # [[ 1  2  3]
                #  [ 4  5  6]
                #  [11 12 13]
                #  [14 15 16]]
Y
Yu Yang 已提交
185
    """
186

187 188 189 190 191 192
    if in_dygraph_mode():
        if isinstance(axis, Variable):
            axis = axis.numpy()
            axis = axis.item(0)
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
193
        out = _C_ops.concat(input, axis)
194
        return out
195 196

    if _in_legacy_dygraph():
S
songyouwei 已提交
197 198
        if isinstance(axis, Variable):
            axis = axis.numpy()
199
            axis = axis.item(0)
200 201
        if not isinstance(input, Variable):
            input = [t for t in input if t.shape.count(0) == 0]
202
        out = _varbase_creator()
203
        _legacy_C_ops.concat(input, out, 'axis', axis)
204
        return out
205

206 207 208 209
    check_type(input, 'input', (list, tuple, Variable), 'concat')
    if not isinstance(input, Variable):
        for id, x in enumerate(input):
            check_variable_and_dtype(
210 211
                x,
                'input[' + str(id) + ']',
212
                ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
213 214
                'concat',
            )
215 216
            if x.dtype != input[0].dtype:
                raise TypeError(
217 218
                    "All the Tensors in the input must have the same data type."
                )
219
    else:
220
        input = [input]
221
    check_type(axis, 'axis', (int, Variable), 'concat')
222

223 224
    if isinstance(axis, Variable):
        check_dtype(
225 226 227 228 229
            axis.dtype,
            'axis',
            ['int32', 'int64'],
            'concat',
            "The data type of axis must be int32 or int64 when axis is a Tensor",
230
        )
231

232
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
233
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
234 235

    if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
236 237 238 239
        # NOTE(liym27): Don't remove this if branch!
        # This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
        # is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.

240 241 242 243
        assert len(input) == 1, (
            "If the elements of 'input' in concat are Variable(LoDTensorArray), "
            "number of the elements must be 1, but received %s." % len(input)
        )
244
        out_index = helper.create_variable_for_type_inference(dtype="int32")
245 246 247 248 249 250
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': input[0]},
            outputs={'Out': [out], 'OutIndex': [out_index]},
            attrs={'axis': axis, 'use_stack': False},
        )
251 252 253 254 255
    else:
        inputs = {'X': input}
        attrs = {}
        if isinstance(axis, Variable):
            axis.stop_gradient = True
256
        attrs['axis'] = axis
257

258 259 260
        helper.append_op(
            type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs
        )
Y
Yu Yang 已提交
261 262 263
    return out


G
Guo Sheng 已提交
264
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
265
    r"""
G
Guo Sheng 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
    This function concatenates or stacks all tensors in the input LoDTensorArray
    along the axis mentioned and returns that as the output.

    For Example:

    .. code-block:: text

        Case 1:

            Given:

                input.data = {[[0.6, 0.1, 0.3],
                               [0.5, 0.3, 0.2]],
                              [[1.3],
                               [1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = False

            Then:

                output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                               [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

                output_index.data = [3, 1, 2]

        Case 2:

            Given:

                input.data = {[[0.6, 0.1],
                               [0.5, 0.3]],
                              [[0.3, 1.3],
                               [0.2, 1.8]],
                              [[2.3, 2.1],
                               [2.5, 2.4]]}

                axis = 1, use_stack = True

            Then:

                output.data = [[[0.6, 0.1]
                                [0.3, 1.3]
                                [2.3, 2.1],
                               [[0.5, 0.3]
                                [0.2, 1.8]
                                [2.5, 2.4]]]

                output_index.data = [2, 2, 2]
L
li099 已提交
316 317

    Args:
G
Guo Sheng 已提交
318 319 320 321 322 323 324
        input(Variable): A LodTensorArray variable.
        axis(int): The axis along which the tensors in attr::`input` will be
            concatenated or stacked.
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
        use_stack(bool): Act as concat_op or stack_op. For stack mode, all
            tensors in the tensor array must have the same shape.
L
li099 已提交
325 326

    Returns:
G
Guo Sheng 已提交
327 328 329
        Variable: The concatenated or stacked tensor variable.
        Variable: A 1-D tensor variable with int32 data type. The data in this \
            tensor contains all input including tensors' sizes along the axis.
L
li099 已提交
330 331 332 333

    Examples:
        .. code-block:: python

334
            import paddle.fluid as fluid
335
            import numpy as np
G
Guo Sheng 已提交
336 337 338 339 340 341 342
            x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
            i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
            array = fluid.layers.create_array(dtype='float32')
            fluid.layers.array_write(x0, i, array)
            fluid.layers.array_write(x1, i + 1, array)
            output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
L
li099 已提交
343
    """
J
Jiabin Yang 已提交
344
    if _non_static_mode():
345
        assert isinstance(
346 347
            input, list
        ), "The 'input' in tensor_array_to_tensor must be list"
348
        from .nn import concat
349
        from ..dygraph import to_variable
350
        from paddle import stack
351

352 353 354
        op = stack if use_stack else concat
        res = op(input, axis=axis)
        sizes = to_variable(
355 356
            numpy.array(list(map(lambda x: int(x.shape[axis]), input)))
        )
357 358
        return res, sizes

359 360 361
    check_type(input, 'input', (list, Variable), 'tensor_array_to_tensor')
    if isinstance(input, list):
        for i, input_x in enumerate(input):
362 363 364 365 366 367
            check_type(
                input_x,
                'input[' + str(i) + ']',
                Variable,
                'tensor_array_to_tensor',
            )
L
li099 已提交
368
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
369 370
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
371 372 373 374 375 376
    helper.append_op(
        type='tensor_array_to_tensor',
        inputs={'X': input},
        outputs={'Out': [out], 'OutIndex': [out_index]},
        attrs={'axis': axis, 'use_stack': use_stack},
    )
L
li099 已提交
377 378 379
    return out, out_index


380
def sums(input, out=None):
381
    r"""
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
    This function computes the sum of multiple input Tensors elementwisely.

    - Case 1, sum of 3 Tensors

    .. code-block:: text

        # Input Tensors
        x0.shape = [2, 3]
        x0.data = [[1., 2., 3.],
                   [4., 5., 6.]]
        x1.shape = [2, 3]
        x1.data = [[10., 20., 30.],
                   [40., 50., 60.]]
        x2.shape = [2, 3]
        x2.data = [[100., 200., 300.],
                   [400., 500., 600.]]

        # Output Tensor
        out.shape = [2, 3]
        out.data = [[111., 222., 333.],
                    [444., 555., 666.]]
K
kavyasrinet 已提交
403 404

    Args:
405 406 407 408
        input (list): A list of Variables which hold input Tensors with the same
            data type and shape. Optional data types are: float32, float64, int32, int64.
        out (Variable, optional): Output Tensor. It can be any existing Variable.
            The default value is None, then a new Variable will be created and returned.
K
kavyasrinet 已提交
409 410

    Returns:
411 412
        Variable: The sum of inputs. The shape and data type is the same with input. \
            If :code:`out` is not None, the returned value is :code:`out` .
K
kavyasrinet 已提交
413 414

    Examples:
F
fengjiayi 已提交
415
        .. code-block:: python
K
kavyasrinet 已提交
416

417 418 419 420 421 422 423 424 425
            import paddle.fluid as fluid

            x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
            x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
            x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
            x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)

            # Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum0 = fluid.layers.sums(input=[x0, x1, x2])
426

427 428
            # Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
            sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
Y
Yu Yang 已提交
429
    """
430 431 432
    check_type(input, 'input', (Variable, tuple, list), 'sums')
    if isinstance(input, list) or isinstance(input, tuple):
        for input_section in input:
433 434 435 436 437 438
            check_variable_and_dtype(
                input_section,
                "input",
                ['float16', 'float32', 'float64', 'int32', 'int64'],
                'sums',
            )
439
    else:
440 441 442 443 444 445
        check_variable_and_dtype(
            input,
            "input",
            ['float16', 'float32', 'float64', 'int32', 'int64'],
            'sums',
        )
446

Y
Yu Yang 已提交
447 448
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
449
        out = helper.create_variable_for_type_inference(
450 451
            dtype=helper.input_dtype()
        )
452
    else:
453 454 455 456 457 458 459 460 461 462
        check_variable_and_dtype(
            out, "out", ['float32', 'float64', 'int32', 'int64'], 'sums'
        )

    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False},
    )
Y
Yu Yang 已提交
463 464 465
    return out


F
fengjiayi 已提交
466
def assign(input, output=None):
467
    """
S
swtkiwi 已提交
468

469
    The OP copies the :attr:`input` to the :attr:`output`.
470

471
    Parameters:
472 473 474 475
        input (Tensor|numpy.ndarray|list|tuple|scalar): A tensor, numpy ndarray, tuple/list of scalar,
            or scalar. Its data type supports float16, float32, float64, int32, int64, and bool.
            Note: the float64 data will be converted to float32 because of current platform protobuf
            data limitation.
476
        output (Tensor, optional): A tensor. If :attr:`output` is None, a new tensor will
477
            be created as :attr:`output`. Default: None.
478 479

    Returns:
480
        Tensor: A tensor with the same shape, data type and value as :attr:`input`.
481 482 483

    Examples:
        .. code-block:: python
484

485
          import paddle
486
          import numpy as np
487
          data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
488 489 490 491
          array = np.array([[1, 1],
                            [3, 4],
                            [1, 3]]).astype(np.int64)
          result1 = paddle.zeros(shape=[3, 3], dtype='float32')
492 493 494
          paddle.assign(array, result1) # result1 = [[1, 1], [3 4], [1, 3]]
          result2 = paddle.assign(data)  # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
          result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
495
    """
Y
Yu Yang 已提交
496
    helper = LayerHelper('assign', **locals())
497 498 499 500 501 502
    check_type(
        input,
        'input',
        (Variable, numpy.ndarray, list, tuple, float, int, bool),
        'assign',
    )
503 504
    is_inplace = True if output is not None else False

505 506 507 508
    if numpy.isscalar(input) and not isinstance(input, str):
        input = numpy.array([input])
    elif isinstance(input, (list, tuple)):
        input = numpy.array(input)
509 510
    # NOTE(Aurelius84): Why we judge core.VarBase?
    # In case of @to_static, a VarBase can be as input of `assign`,
J
Jiabin Yang 已提交
511
    # but _non_static_mode()==False under @to_static, which means
512 513 514
    # isinstance(VarBase, Variable) == False. It will cause return None
    # after this api.
    if isinstance(input, (Variable, core.VarBase)):
515
        if _non_static_mode():
C
chentianyu03 已提交
516
            if in_dygraph_mode() and output is None:
517
                output = _C_ops.assign(input)
518 519
            elif in_dygraph_mode() and output is not None:
                _C_ops.assign_out_(input, output)
C
chentianyu03 已提交
520 521 522 523 524 525
            else:
                if output is None:
                    if _in_legacy_dygraph():
                        output = core.VarBase()
                    else:
                        output = core.eager.Tensor()
526
                _legacy_C_ops.assign(input, output)
527
        else:
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
            check_dtype(
                input.dtype,
                'input',
                [
                    'float16',
                    'uint16',
                    'float32',
                    'float64',
                    'int32',
                    'int64',
                    'uint8',
                    'bool',
                ],
                'assign',
                '(When the type of input in assign is Variable.)',
            )
544 545
            if output is None:
                output = helper.create_variable_for_type_inference(
546 547 548 549 550
                    dtype=input.dtype
                )
            helper.append_op(
                type='assign', inputs={'X': [input]}, outputs={'Out': [output]}
            )
X
xuwei06 已提交
551
    elif isinstance(input, numpy.ndarray):
552 553 554 555 556
        # Not support [var, var, ...] currently.
        if len(input.shape) > 0 and any(isinstance(x, Variable) for x in input):
            raise TypeError(
                "Required type(input) numpy.ndarray, but found `list(Variable)` in input."
            )
X
xuwei06 已提交
557
        dtype = convert_np_dtype_to_dtype_(input.dtype)
558 559 560 561 562 563
        if dtype == VarDesc.VarType.FP64:
            # Setting FP64 numpy data is not supported in Paddle, so we
            # use FP32 here
            warnings.warn(
                "paddle.assign doesn't support float64 input now due "
                "to current platform protobuf data limitation, we convert "
564 565
                "it to float32"
            )
566
            dtype = VarDesc.VarType.FP32
567 568
        if dtype == VarDesc.VarType.BOOL:
            value_name = "bool_values"
W
wanghuancoder 已提交
569
            values = [int(v) for v in input.flat]
570
        elif dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
571
            value_name = "fp32_values"
572
            values = [float(v) for v in input.flat]
573
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
574
            value_name = "int32_values"
575
            values = [int(v) for v in input.flat]
576 577 578
        elif dtype == VarDesc.VarType.INT64:
            value_name = "int64_values"
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
579
        else:
580 581
            raise TypeError(
                "When the type of 'input' in assign is numpy.ndarray, "
582
                "the data type of 'input' must be bool, float32, int32 or int64, but "
583 584
                "received %s." % convert_dtype(dtype)
            )
585
        if input.size > 1024 * 1024:
586 587 588 589
            raise ValueError(
                "The size of input is too big. Please consider "
                "saving it to file and 'load_op' to load it"
            )
590 591 592
        if in_dygraph_mode():
            if output is None:
                output = zeros(list(input.shape), dtype)
593 594 595 596 597 598 599
            _C_ops.assign_value_(
                output,
                list(input.shape),
                dtype,
                values,
                _current_expected_place(),
            )
600 601 602
        elif _in_legacy_dygraph():
            if output is None:
                output = core.VarBase()
603 604 605 606 607 608 609 610 611
            _legacy_C_ops.assign_value(
                output,
                'shape',
                list(input.shape),
                'dtype',
                dtype,
                value_name,
                values,
            )
612
        else:
613 614
            if output is None:
                output = helper.create_variable_for_type_inference(
615 616 617 618 619 620 621 622 623 624 625
                    dtype=input.dtype
                )
            helper.append_op(
                type='assign_value',
                outputs={'Out': [output]},
                attrs={
                    'dtype': dtype,
                    'shape': list(input.shape),
                    value_name: values,
                },
            )
X
xuwei06 已提交
626

J
Jiabin Yang 已提交
627
    if is_inplace and _non_static_mode():
628
        output._bump_inplace_version()
629

Y
Yu Yang 已提交
630 631 632
    return output


633
def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
Y
Yu Yang 已提交
634
    """
S
swtkiwi 已提交
635

W
wangchaochaohu 已提交
636
    This OP creates a Tensor with specified `shape` and `dtype`, and
T
tianshuo78520a 已提交
637
    initializes it with a constant specified by `value`.
K
kavyasrinet 已提交
638

T
tianshuo78520a 已提交
639
    The attribute `stop_gradient` of the created Tensor is set to True.
640 641

    Args:
642 643 644
        shape(list|tuple|Tensor): Shape of the output Tensor, the data type of ``shape`` is int32 or int64.
            If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
            If ``shape`` is an Tensor, it should be an 1-D Tensor with date type int32 or int64.
W
wangchaochaohu 已提交
645
        dtype(np.dtype|str): Data type of the output Tensor which can
646
            be float16, float32, float64, uint8, int16, int32, int64.
647
        value(bool|float|int|Tensor): The constant value used to initialize
648 649
            the Tensor to be created. If ``value`` is an Tensor, it should be an 1-D Tensor.
        force_cpu(bool, optional): data should be on CPU if it's true, default value is False.
650
        out(Tensor, optional): Optional output which can be any created
651 652
            Tensor that meets the requirements to store the result of operation.
            if ``out`` is None, a new Tensor will be create to store the result.
653 654
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
655 656

    Returns:
657
        Tensor: Tensor which is created according to shape and dtype.
W
wangchaochaohu 已提交
658

659 660 661
    Examples:
        .. code-block:: python

662
          import paddle.fluid as fluid
663
          # attr shape is a list which doesn't contain  Tensor.
664 665
          data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
          data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
666
          # data1=[[5], [5]] data2=[[5], [5]]
667

668
          # attr shape is a list which contains Tensor.
669
          positive_2 = fluid.layers.fill_constant([1], "int32", 2)
670
          data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[[1.5, 1.5]]
671

672
          # attr shape is a Tensor.
673
          shape = fluid.layers.fill_constant([2], "int32", 2) # shape=[2,2]
674
          data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
675

676
          # attr value is a Tensor.
W
wangchaochaohu 已提交
677 678
          val = fluid.layers.fill_constant([1], "float32", 2.0) # val=[2.0]
          data5 = fluid.layers.fill_constant(shape=[2,1], value=val, dtype='float32') #data5=[[2.0],[2.0]]
Y
Yu Yang 已提交
679
    """
680

W
wangchaochaohu 已提交
681
    attrs = {'force_cpu': force_cpu}
682
    dtype = convert_dtype(dtype)
683
    if not isinstance(value, Variable):
684
        if dtype in ['uint8', 'int16', 'int32', 'int64']:
W
wangchaochaohu 已提交
685
            attrs['str_value'] = str(int(value))
686
            attrs['value'] = int(value)
W
wangchaochaohu 已提交
687 688
        else:
            attrs['str_value'] = str(float(value))
689
            attrs['value'] = float(value)
690

691 692 693 694 695
    if in_dygraph_mode():
        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
        if isinstance(shape, (list, tuple)):
696
            shape = utils.convert_shape_to_list(shape)
697 698 699 700 701

        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        if out is None:
702
            out = _C_ops.full(shape, float(value), dtype, place)
703 704 705
            out.stop_gradient = True
            return out

706 707
        if out is not None:
            # final state mode is support out is not None.
708
            _C_ops.full_(out, shape, float(value), dtype, place)
709 710
            out.stop_gradient = True
            return out
711

712 713 714 715 716 717 718 719 720 721 722
    if _in_legacy_dygraph():
        shape = utils.convert_shape_to_list(shape)
        if out is None:
            out = _varbase_creator(dtype=dtype)

        if isinstance(value, Variable):
            if dtype in ['uint8', 'int16', 'int32', 'int64']:
                attrs['str_value'] = str(int(value.numpy().item(0)))
            else:
                attrs['str_value'] = str(float(value.numpy().item(0)))

723 724 725 726 727 728 729 730 731 732 733 734 735
        _legacy_C_ops.fill_constant(
            out,
            'value',
            float(value),
            'force_cpu',
            force_cpu,
            'dtype',
            out.dtype,
            'str_value',
            attrs['str_value'],
            'shape',
            shape,
        )
736 737 738
        out.stop_gradient = True
        return out

739 740 741
    helper = LayerHelper("fill_constant", **locals())
    inputs = {}
    if isinstance(value, Variable):
742 743
        if convert_dtype(value.dtype) != dtype:
            value = cast(value, dtype)
744 745
        inputs['ValueTensor'] = value

746
    check_shape(shape)
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
    check_dtype(
        dtype,
        'dtype',
        [
            'bool',
            'float16',
            'float32',
            'float64',
            'uint8',
            'int16',
            'int32',
            'int64',
            'complex64',
            'complex128',
        ],
        'fill_constant',
    )
764
    check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
765

766
    if out is not None:
767 768 769
        check_variable_and_dtype(
            out, 'out', [convert_dtype(dtype)], 'fill_constant'
        )
770 771

    helper = LayerHelper("fill_constant", **locals())
772 773 774
    utils.get_shape_tensor_inputs(
        inputs=inputs, attrs=attrs, shape=shape, op_type='fill_constant'
    )
L
liym27 已提交
775

Y
Yu Yang 已提交
776
    if out is None:
X
Xin Pan 已提交
777
        out = helper.create_variable_for_type_inference(dtype=dtype)
L
liym27 已提交
778
    attrs['dtype'] = out.dtype
779 780 781 782 783 784 785
    helper.append_op(
        type='fill_constant',
        inputs=inputs,
        outputs={'Out': [out]},
        attrs=attrs,
        stop_gradient=True,
    )
Y
Yu Yang 已提交
786 787 788 789
    out.stop_gradient = True
    return out


790
@deprecated(since='1.8.0', update_to="paddle.fluid.layers.fill_constant")
Y
yuyang18 已提交
791
@templatedoc()
792 793 794 795 796 797 798 799 800
def fill_constant_batch_size_like(
    input,
    shape,
    dtype,
    value,
    input_dim_idx=0,
    output_dim_idx=0,
    force_cpu=False,
):
801
    """
T
tianshuo78520a 已提交
802
    This OP creates a Tesnor according the shape and dtype, and initializes the
W
wangchaochaohu 已提交
803 804 805 806
    Tensor with the constants provided in ``value``. When the input is LoDTensor
    and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
    of the batch_size input by the input, the Stop_gradient attribute of the created
    Tensor is False by default.
807 808

    Args:
W
wangchaochaohu 已提交
809 810 811 812 813
        input(Variable): Tensor which data type is float32, float64, int32 and int64.
        shape(list): The shape of Tensor to be created, Tensor's shape may be changed
            according the input.
        dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
            can be float32, float64, int32, int64.
814
        value(float|int): The constant value used to initialize the Tensor to be created.
W
wangchaochaohu 已提交
815 816 817 818 819
        input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
            dimension of the created Tensor is set to the batch_size value of input.
            The default value is 0.
        output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
            the value of batch_size of input Tensor. The default value is 0.
T
tianshuo78520a 已提交
820
        force_cpu(bool): data should be on CPU if it's true, default value is False.
Y
yuyang18 已提交
821 822

    Returns:
W
wangchaochaohu 已提交
823
        Variable: Tensor which will be created according to dtype.
H
haowang101779990 已提交
824 825 826 827 828

    Examples:

        .. code-block:: python

829
             import paddle.fluid as fluid
W
wangchaochaohu 已提交
830
             like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
W
wangchaochaohu 已提交
831
             data = fluid.layers.fill_constant_batch_size_like(
W
wangchaochaohu 已提交
832
                    input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
H
haowang101779990 已提交
833

834
    """
835 836 837 838 839 840 841
    if in_dygraph_mode():
        if not isinstance(dtype, core.VarDesc.VarType):
            dtype = convert_np_dtype_to_dtype_(dtype)

        place = _current_expected_place()
        if force_cpu:
            place = core.CPUPlace()
842 843 844
        out = _C_ops.full_batch_size_like(
            input, shape, dtype, value, input_dim_idx, output_dim_idx, place
        )
845 846 847
        out.stop_gradient = True
        return out

Y
Yu Yang 已提交
848
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
849
    out = helper.create_variable_for_type_inference(dtype=dtype)
850 851 852 853 854 855
    attrs = {
        'shape': shape,
        'dtype': out.dtype,
        'value': float(value),
        'input_dim_idx': input_dim_idx,
        'output_dim_idx': output_dim_idx,
856
        'force_cpu': force_cpu,
857 858 859 860 861
    }
    if convert_dtype(dtype) in ['int64', 'int32']:
        attrs['str_value'] = str(int(value))
    else:
        attrs['str_value'] = str(float(value))
862 863 864 865 866 867
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
        attrs=attrs,
    )
Y
Yu Yang 已提交
868 869 870 871
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
872 873
def argmin(x, axis=0):
    """
874 875 876
        :alias_main: paddle.argmin
        :alias: paddle.argmin,paddle.tensor.argmin,paddle.tensor.search.argmin
        :old_api: paddle.fluid.layers.argmin
S
swtkiwi 已提交
877

S
sneaxiy 已提交
878 879
    **argmin**

880 881
    This OP computes the indices of the min elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
882 883

    Args:
884 885 886 887 888
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
889

S
sneaxiy 已提交
890
    Returns:
891
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
892

S
sneaxiy 已提交
893 894
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
895

896
            import paddle.fluid as fluid
897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmin(x=x, axis=-1)
                out2 = fluid.layers.argmin(x=x, axis=0)
                out3 = fluid.layers.argmin(x=x, axis=1)
                out4 = fluid.layers.argmin(x=x, axis=2)
                print(out1.numpy())
                # [[0 0 2]
                #  [1 0 2]]
                print(out2.numpy())
                # [[0 1 1 1]
                #  [0 0 0 0]
                #  [1 1 1 0]]
                print(out3.numpy())
                # [[1 1 1 2]
                #  [2 0 2 0]]
                print(out4.numpy())
                # [[0 0 2]
                #  [1 0 2]]
S
sneaxiy 已提交
924
    """
925
    check_variable_and_dtype(
926 927 928 929 930
        x,
        'x',
        ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmin',
    )
S
sneaxiy 已提交
931
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
932
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
933 934 935 936 937 938
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis},
    )
939
    out.stop_gradient = True
S
sneaxiy 已提交
940 941 942 943 944 945 946
    return out


def argmax(x, axis=0):
    """
    **argmax**

947 948
    This OP computes the indices of the max elements of the input tensor's
    element along the provided axis.
S
sneaxiy 已提交
949 950

    Args:
951 952 953 954 955
        x(Variable): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
F
fengjiayi 已提交
956

S
sneaxiy 已提交
957
    Returns:
958
        Variable: A Tensor with data type int64.
F
fengjiayi 已提交
959

S
sneaxiy 已提交
960 961
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
962

963
            import paddle.fluid as fluid
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
            import numpy as np

            in1 = np.array([[[5,8,9,5],
                            [0,0,1,7],
                            [6,9,2,4]],
                            [[5,2,4,2],
                            [4,7,7,9],
                            [1,7,0,6]]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.argmax(x=x, axis=-1)
                out2 = fluid.layers.argmax(x=x, axis=0)
                out3 = fluid.layers.argmax(x=x, axis=1)
                out4 = fluid.layers.argmax(x=x, axis=2)
                print(out1.numpy())
                # [[2 3 1]
                #  [0 3 1]]
                print(out2.numpy())
                # [[0 0 0 0]
                #  [1 1 1 1]
                #  [0 0 0 1]]
                print(out3.numpy())
                # [[2 2 0 1]
                #  [0 1 1 1]]
                print(out4.numpy())
                # [[2 3 1]
                #  [0 3 1]]
S
sneaxiy 已提交
991
    """
992
    check_variable_and_dtype(
993 994 995 996 997
        x,
        'x',
        ['float32', 'float64', 'uint8', 'int16', 'int32', 'int64'],
        'argmax',
    )
S
sneaxiy 已提交
998
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
999
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
1000 1001 1002 1003 1004 1005
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis},
    )
1006
    out.stop_gradient = True
S
sneaxiy 已提交
1007 1008 1009
    return out


1010
def zeros(shape, dtype, force_cpu=False, name=None):
Y
Yu Yang 已提交
1011
    """
1012 1013
    The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
    Its :attr:`stop_gradient` will be set to True to stop gradient computation.
1014

1015
    Parameters:
1016
        shape(tuple|list|Tensor): Shape of output Tensor, the data type of ``shape`` is int32 or int64.
W
wangchaochaohu 已提交
1017
        dtype (np.dtype|str): Data type of output Tensor, it supports
1018
            bool, float16, float32, float64, int32 and int64.
1019 1020
        force_cpu (bool, optional): Whether force to store the output Tensor in CPU memory.
            If :attr:`force_cpu` is False, the output Tensor will be stored in running device memory.
1021
            Default: False.
1022 1023
        name(str, optional): The default value is None.  Normally there is no need for user to set this
            property.  For more information, please refer to :ref:`api_guide_Name`.
1024 1025

    Returns:
1026
        Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
1027 1028 1029 1030

    Examples:
        .. code-block:: python

1031
          import paddle.fluid as fluid
1032
          data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
1033

1034 1035 1036
          # shape is a Tensor
          shape = fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
          data1 = fluid.layers.zeros(shape=shape, dtype='int32') #[[0, 0], [0, 0]]
Y
Yu Yang 已提交
1037 1038
    """
    return fill_constant(value=0.0, **locals())