nn.py 28.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Y
Yu Yang 已提交
14
"""
15
All layers just related to the neural network.
Y
Yu Yang 已提交
16
"""
P
peizhilin 已提交
17
import os
S
sneaxiy 已提交
18
import inspect
19 20 21 22 23
import warnings

import numpy as np

import paddle
Y
Yu Yang 已提交
24
from ..layer_helper import LayerHelper
25 26 27 28 29 30 31 32 33 34 35
from ..framework import (
    Variable,
    OpProtoHolder,
    dygraph_only,
    _dygraph_tracer,
    default_main_program,
    _varbase_creator,
    static_only,
    _global_flags,
    in_dygraph_mode,
)
36
from ..framework import _current_expected_place
37
from .. import dygraph_utils
Y
yangyaming 已提交
38
from ..param_attr import ParamAttr
39 40 41 42 43
from .layer_function_generator import (
    autodoc,
    templatedoc,
    _generate_doc_string_,
)
44
from .tensor import concat, assign, fill_constant, zeros
45
from . import utils
F
fengjiayi 已提交
46
from .. import unique_name
47
from .. import core
48
from ...utils import deprecated
49 50 51 52 53 54
from ..data_feeder import (
    convert_dtype,
    check_variable_and_dtype,
    check_type,
    check_dtype,
)
55
from paddle.utils import deprecated
56
from paddle import _C_ops, _legacy_C_ops
57 58
from collections.abc import Iterable

Y
Yu Yang 已提交
59 60

__all__ = [
X
Xin Pan 已提交
61 62
    'embedding',
    'autoincreased_step_counter',
Y
Yu Yang 已提交
63 64
]

65
OP_NAMEMAPPING = {
66 67 68 69 70 71 72 73
    'elementwise_max': 'maximum',
    'elementwise_min': 'minimum',
    'elementwise_pow': 'elementwise_pow',
    'elementwise_floordiv': 'floor_divide',
    'elementwise_add': 'add',
    'elementwise_sub': 'subtract',
    'elementwise_mul': 'multiply',
    'elementwise_div': 'divide',
C
Chen Weihang 已提交
74
    'elementwise_mod': 'remainder',
75 76
}

Y
Yu Yang 已提交
77

78 79
def _get_reduce_dim(dim, input):
    """
80
    Internal function for reduce_sum, reduce_mean, reduce_prod.
81 82 83 84 85 86 87 88 89
    It computes the attribute reduce_all value based on axis.
    """
    if dim is not None and not isinstance(dim, list):
        if isinstance(dim, (tuple, range)):
            dim = list(dim)
        elif isinstance(dim, int):
            dim = [dim]
        else:
            raise TypeError(
90
                "The type of dim must be int, list, tuple or range, but received {}".format(
91
                    type(dim)
92 93
                )
            )
94 95 96 97 98 99 100 101 102 103
    if dim is None:
        dim = []
    if dim == [] or len(dim) == len(input.shape):
        reduce_all = True
    else:
        reduce_all = False

    return reduce_all, dim


104
@dygraph_only
105 106 107
def _elementwise_op_in_dygraph(
    x, y, axis=-1, act=None, use_mkldnn=False, op_name=None
):
108 109 110 111
    def is_inplace(op_name):
        return op_name[-1] == "_"

    if op_name not in OP_NAMEMAPPING.keys() or axis != -1:
112
        op = getattr(_legacy_C_ops, op_name)
113 114 115
        out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
    else:
        if in_dygraph_mode():
116 117
            op = getattr(
                _C_ops,
118 119
                OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name,
            )
120
            out = op(x, y)
121 122 123 124 125
    return dygraph_utils._append_activation_in_dygraph(
        out, act, use_mkldnn=use_mkldnn
    )


T
tangwei12 已提交
126
@deprecated(since="2.0.0", update_to="paddle.nn.functional.embedding")
127 128 129 130 131 132 133 134 135
def embedding(
    input,
    size,
    is_sparse=False,
    is_distributed=False,
    padding_idx=None,
    param_attr=None,
    dtype='float32',
):
136
    r"""
137
    :api_attr: Static Graph
138

139 140 141 142 143 144 145 146 147 148 149 150
    **WARING:** This OP will be deprecated in a future release. This OP requires the
    last dimension of Tensor shape must be equal to 1. It is recommended to use
    fluid. :ref:`api_fluid_embedding` .

    The operator is used to lookup embeddings vector of ids provided by :attr:`input` .
    It automatically constructs a 2D embedding matrix based on the
    input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .

    This OP requires the last dimension of Tensor shape must be equal to 1. The shape
    of output Tensor is generated by replacing the last dimension of the input Tensor shape
    with emb_size.

151
    **Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
    otherwise the program will throw an exception and exit.

    .. code-block:: text

        Case 1:

        input is a Tensor. padding_idx = -1
            input.data = [[[1], [3]], [[2], [4]], [[4], [127]]]
            input.shape = [3, 2, 1]
        Given size = [128, 16]
        output is a Tensor:
            out.shape = [3, 2, 16]
            out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
                        [0.345421456, 0.524563927, ..., 0.144534654]],

                        [[0.345249859, 0.124939536, ..., 0.194353745],
                        [0.945345345, 0.435394634, ..., 0.435345365]],
169

170 171 172 173
                        [[0.945345345, 0.435394634, ..., 0.435345365],
                        [0.0,         0.0,         ..., 0.0        ]]]  # padding data
        The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
        It will pad all-zero data when ids is 127.
174

175
        Case 2:
176

177 178 179 180 181 182 183 184 185 186 187 188 189 190
        input is a LoDTensor with 1-level LoD. padding_idx = 0
            input.lod = [[2, 3]]
            input.data = [[1], [3], [2], [4], [0]]
            input.shape = [5, 1]
        Given size = [128, 16]
        output is a LoDTensor:
            out.lod = [[2, 3]]
            out.shape = [5, 16]
            out.data = [[0.129435295, 0.244512452, ..., 0.436322452],
                        [0.345421456, 0.524563927, ..., 0.144534654],
                        [0.345249859, 0.124939536, ..., 0.194353745],
                        [0.945345345, 0.435394634, ..., 0.435345365],
                        [0.0,         0.0,         ..., 0.0        ]]  # padding data
        It will pad all-zero data when ids is 0.
Y
Yu Yang 已提交
191 192

    Args:
193 194 195 196 197 198
        input(Variable): A Tensor or LoDTensor with type int64, which contains the id information.
            The last dimension of Tensor shape must be equal to 1. The value of the input id should
            satisfy :math:`0<= id < size[0]` .
        size(tuple|list): The shape of lookup table parameter. It should have two elements which
            indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
        is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
199
            affects the performance of the backwards gradient update. It is recommended to set
200
            True because sparse update is faster. But some optimizer does not support sparse update,
201
            such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
202 203 204 205 206
            :ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
            :ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
            In these case, is_sparse must be False. Default: False.
        is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
            in multi-machine distributed CPU training. Default: False.
207
        padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
208 209 210 211 212 213
            If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
            to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
            encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
            If set None, it makes no effect to output. Default: None.
        param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
            default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
214
            user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
215
            The local word vector needs to be transformed into numpy format, and the shape of local word
T
tianshuo78520a 已提交
216
            vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
217 218 219
            is used to load custom or pre-trained word vectors. See code example 2 for details.
        dtype(str|core.VarDesc.VarType): It refers to the data type of output Tensor.
            It must be float32 or float64. Default: float32.
Y
Yu Yang 已提交
220

221
    Returns:
222
        Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Y
Yu Yang 已提交
223

224 225
    Examples:
        .. code-block:: python
Y
Yu Yang 已提交
226

B
bdzhuxiaoning 已提交
227
          import paddle.fluid as fluid
228
          import numpy as np
229 230
          import paddle
          paddle.enable_static()
231

232 233
          data = fluid.data(name='x', shape=[None, 1], dtype='int64')

T
tianshuo78520a 已提交
234
          # example 1
235 236 237 238 239 240 241
          emb_1 = fluid.embedding(input=data, size=[128, 64])

          # example 2: load custom or pre-trained word vectors
          weight_data = np.random.random(size=(128, 100))  # word vectors with numpy format
          w_param_attrs = fluid.ParamAttr(
              name="emb_weight",
              learning_rate=0.5,
242
              initializer=paddle.nn.initializer.Assign(weight_data),
243
              trainable=True)
244
          emb_2 = fluid.layers.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32')
Y
Yu Yang 已提交
245 246 247
    """

    helper = LayerHelper('embedding', **locals())
248 249 250 251 252 253 254 255 256
    check_variable_and_dtype(
        input, 'input', ['int64'], 'fluid.layers.embedding'
    )
    check_dtype(
        dtype,
        'dtype',
        ['uint16', 'float16', 'float32', 'float64'],
        'fluid.layers.embedding',
    )
257 258 259 260 261 262 263 264 265

    if is_distributed:
        is_distributed = False
        warnings.warn(
            "is_distributed is go out of use, `fluid.contrib.layers.sparse_embedding` is your needed"
        )

    remote_prefetch = True if is_sparse else False

266 267 268
    w = helper.create_parameter(
        attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False
    )
X
Xin Pan 已提交
269
    tmp = helper.create_variable_for_type_inference(dtype)
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
    padding_idx = (
        -1
        if padding_idx is None
        else padding_idx
        if padding_idx >= 0
        else (size[0] + padding_idx)
    )
    helper.append_op(
        type='lookup_table',
        inputs={'Ids': input, 'W': w},
        outputs={'Out': tmp},
        attrs={
            'is_sparse': is_sparse,
            'is_distributed': is_distributed,
            'remote_prefetch': remote_prefetch,
            'padding_idx': padding_idx,
        },
    )
Y
Yu Yang 已提交
288 289 290
    return tmp


291 292 293 294 295 296 297 298 299 300 301
def _pull_sparse(
    input,
    size,
    table_id,
    accessor_class,
    name="embedding",
    ctr_label_name="",
    padding_id=0,
    dtype='float32',
    scale_sparse_grad=True,
):
302
    r"""
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
    **Pull Fleet Sparse Layer**

    This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
    Fleet lookup table. The result of this lookup is the embedding of each ID in the
    :attr:`input`.

    Args:
        input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
            contains the IDs information.
        size(int): The embedding size parameter, which indicates the size of
            each embedding vector respectively.
        table_id(int): the fleet table id of this embedding.
        accessor_class(str): the pslib accessor of the table, default is DownpourCtrAccessor.
        ctr_label_name(str): the layer name of click.
        padding_id(int): the padding id during lookup, default is 0.
        dtype(str): The dtype refers to the data type of output tensor. Only supports
            float32 now.
        scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
            is True.

    Returns:
        Variable|list of Variable: The tensor variable storing the embeddings of the \
                  supplied inputs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
G
GGBond8488 已提交
331
          data = paddle.static.data(name='sequence', shape=[-1, 1], dtype='int64', lod_level=1)
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
          emb = fluid.layers.nn._pull_sparse(
              input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
    """
    helper = LayerHelper(name, **locals())
    inputs = helper.multiple_input()
    outs = [helper.create_variable_for_type_inference(dtype)]
    input_names = [i.name for i in inputs]
    attrs = {
        'EmbeddingDim': size,
        'TableId': table_id,
        'AccessorClass': accessor_class,
        'CtrLabelName': ctr_label_name,
        'PaddingId': padding_id,
        'ScaleSparseGrad': scale_sparse_grad,
        'InputNames': input_names,
        # this is only for compatible with embedding op
348
        'is_distributed': True,
349 350
    }
    # this is only for compatible with embedding op
351 352 353 354 355 356 357 358 359
    w, _ = helper.create_or_get_global_variable(
        name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True
    )
    helper.append_op(
        type='pull_sparse',
        inputs={'Ids': inputs, 'W': w},
        outputs={'Out': outs},
        attrs=attrs,
    )
360 361 362 363 364
    if len(outs) == 1:
        return outs[0]
    return outs


365 366 367 368 369 370 371 372 373 374 375
def _pull_sparse_v2(
    input,
    size,
    table_id,
    accessor_class,
    name="embedding",
    ctr_label_name="",
    padding_id=0,
    dtype='float32',
    scale_sparse_grad=True,
):
376
    r"""
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
    **Pull Fleet Sparse Layer**

    This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
    Fleet lookup table. The result of this lookup is the embedding of each ID in the
    :attr:`input`.

    Args:
        input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
            contains the IDs information.
        size(int): The embedding size parameter, which indicates the size of
            each embedding vector respectively.
        table_id(int): the pslib table id of this embedding.
        accessor_class(str): the fleet accessor of the table, default is DownpourCtrAccessor.
        ctr_label_name(str): the layer name of click.
        padding_id(int): the padding id during lookup, default is 0.
        dtype(str): The dtype refers to the data type of output tensor. Only supports
            float32 now.
        scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
            is True.

    Returns:
        Variable|list of Variable: The tensor variable storing the embeddings of the \
                  supplied inputs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
G
GGBond8488 已提交
405
          data = paddle.static.data(name='sequence', shape=[-1, 1], dtype='int64', lod_level=1)
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
          emb = fluid.layers.nn._pull_sparse_v2(
              input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
    """
    helper = LayerHelper(name, **locals())
    inputs = helper.multiple_input()
    outs = [helper.create_variable_for_type_inference(dtype)]
    input_names = [i.name for i in inputs]
    attrs = {
        'EmbeddingDim': size,
        'TableId': table_id,
        'AccessorClass': accessor_class,
        'CtrLabelName': ctr_label_name,
        'PaddingId': padding_id,
        'ScaleSparseGrad': scale_sparse_grad,
        'InputNames': input_names,
        # this is only for compatible with embedding op
422
        'is_distributed': True,
423 424
    }
    # this is only for compatible with embedding op
425 426 427 428 429 430 431 432 433
    w, _ = helper.create_or_get_global_variable(
        name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True
    )
    helper.append_op(
        type='pull_sparse_v2',
        inputs={'Ids': inputs, 'W': w},
        outputs={'Out': outs},
        attrs=attrs,
    )
434
    if len(outs) == 1:
Y
yaoxuefeng 已提交
435 436 437 438
        return outs[0]
    return outs


439 440 441
def _pull_gpups_sparse(
    input, size, dtype='float32', is_distributed=False, is_sparse=False
):
Y
yaoxuefeng 已提交
442 443 444 445 446 447 448 449 450 451 452 453 454
    r"""
    **Pull GpuPS Sparse Layer**

    This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
    GpuPS lookup table. The result of this lookup is the embedding of each ID in the
    :attr:`input`.

    Args:
        input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
            contains the IDs information.
        size(int|list of int): The embedding size parameter of each input, which indicates the size of
            each embedding vector respectively.
        dtype(str): The dtype refers to the data type of output tensor. Only supports
455
        float32 now.
Y
yaoxuefeng 已提交
456 457 458 459 460 461 462 463 464 465

    Returns:
        Variable|list of Variable: The tensor variable storing the embeddings of the \
                  supplied inputs, whose size are indicated by size respectively.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          slots = []
G
GGBond8488 已提交
466
          data_1 = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1)
Y
yaoxuefeng 已提交
467
          slots.append(data_1)
G
GGBond8488 已提交
468
          data_2 = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1)
Y
yaoxuefeng 已提交
469 470 471 472 473 474
          slots.append(data_2)
          embs = fluid.layers.pull_gpups_sparse(input=slots, size=[11, 35])
    """
    helper = LayerHelper('pull_gpups_sparse', **locals())
    if dtype != 'float32':
        raise ValueError(
475 476 477
            "GpuPS only support float type embedding now, and your type is: "
            + dtype
        )
Y
yaoxuefeng 已提交
478 479 480 481 482 483
    helper.input_dtype()
    inputs = helper.multiple_input()
    outs = [
        helper.create_variable_for_type_inference(dtype)
        for i in range(len(inputs))
    ]
484 485 486 487 488 489 490 491 492 493 494 495 496
    w = helper.create_parameter(
        attr=helper.param_attr, shape=[size[0]], dtype=dtype, is_bias=False
    )
    helper.append_op(
        type='pull_gpups_sparse',
        inputs={'Ids': inputs, 'W': w},
        outputs={'Out': outs},
        attrs={
            'size': size,
            'is_distributed': is_distributed,
            'is_sparse': is_sparse,
        },
    )
Y
yaoxuefeng 已提交
497
    if len(outs) == 1:
498 499 500 501
        return outs[0]
    return outs


502 503 504
def _pull_box_sparse(
    input, size, dtype='float32', is_distributed=False, is_sparse=False
):
505
    r"""
H
hutuxian 已提交
506 507 508 509 510 511 512
    **Pull Box Sparse Layer**

    This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
    BoxPS lookup table. The result of this lookup is the embedding of each ID in the
    :attr:`input`.

    Args:
513
        input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
H
hutuxian 已提交
514
            contains the IDs information.
515
        size(int): The embedding size parameter, which indicates the size of
H
hutuxian 已提交
516
            each embedding vector respectively.
517
        dtype(str): The dtype refers to the data type of output tensor. Only supports
518
        float32 now.
H
hutuxian 已提交
519 520 521 522 523 524 525 526 527

    Returns:
        Variable|list of Variable: The tensor variable storing the embeddings of the \
                  supplied inputs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
G
GGBond8488 已提交
528
          data = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1)
529
          emb = fluid.layers.pull_box_sparse(input=data, size=[11])
H
hutuxian 已提交
530 531 532 533
    """
    helper = LayerHelper('pull_box_sparse', **locals())
    if dtype != 'float32':
        raise ValueError(
534 535 536
            "BoxPS only support float type embedding now, and your type is: "
            + dtype
        )
H
hutuxian 已提交
537 538 539 540 541 542
    helper.input_dtype()
    inputs = helper.multiple_input()
    outs = [
        helper.create_variable_for_type_inference(dtype)
        for i in range(len(inputs))
    ]
543 544 545 546 547 548 549 550 551 552 553 554 555
    w = helper.create_parameter(
        attr=helper.param_attr, shape=[size], dtype=dtype, is_bias=False
    )
    helper.append_op(
        type='pull_box_sparse',
        inputs={'Ids': inputs, 'W': w},
        outputs={'Out': outs},
        attrs={
            'size': size,
            'is_distributed': is_distributed,
            'is_sparse': is_sparse,
        },
    )
H
hutuxian 已提交
556 557 558 559 560
    if len(outs) == 1:
        return outs[0]
    return outs


C
caoying03 已提交
561
def reduce_sum(input, dim=None, keep_dim=False, name=None):
G
guosheng 已提交
562
    """
563

Y
yangyaming 已提交
564
    Computes the sum of tensor elements over the given dimension.
G
guosheng 已提交
565 566

    Args:
567 568 569
        input (Variable): The input variable which is a Tensor, the data type is float32,
            float64, int32, int64.
        dim (list|int, optional): The dimensions along which the sum is performed. If
Y
yangyaming 已提交
570 571
            :attr:`None`, sum all elements of :attr:`input` and return a
            Tensor variable with a single element, otherwise must be in the
W
whs 已提交
572 573
            range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
            the dimension to reduce is :math:`rank + dim[i]`.
574
        keep_dim (bool, optional): Whether to reserve the reduced dimension in the
Y
yangyaming 已提交
575
            output Tensor. The result tensor will have one fewer dimension
576 577 578 579
            than the :attr:`input` unless :attr:`keep_dim` is true, default
            value is False.
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
G
guosheng 已提交
580 581

    Returns:
582 583
        Variable: Tensor, results of summation operation on the specified dim of input tensor,
        it's data type is the same as input's Tensor.
F
fengjiayi 已提交
584

585 586
    Raises:
        TypeError, if out data type is different with the input data type.
587

G
guosheng 已提交
588 589 590
    Examples:
        .. code-block:: python

591
            import paddle.fluid as fluid
592 593
            import paddle
            paddle.enable_static()
G
guosheng 已提交
594 595 596
            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
Q
qiaolongfei 已提交
597
            # Each example is followed by the corresponding output tensor.
598
            x = fluid.data(name='x', shape=[2, 4], dtype='float32')
G
guosheng 已提交
599 600 601 602
            fluid.layers.reduce_sum(x)  # [3.5]
            fluid.layers.reduce_sum(x, dim=0)  # [0.3, 0.5, 1.1, 1.6]
            fluid.layers.reduce_sum(x, dim=-1)  # [1.9, 1.6]
            fluid.layers.reduce_sum(x, dim=1, keep_dim=True)  # [[1.9], [1.6]]
W
whs 已提交
603

604
            # y is a Tensor variable with shape [2, 2, 2] and elements as below:
W
whs 已提交
605 606
            #      [[[1, 2], [3, 4]],
            #      [[5, 6], [7, 8]]]
Q
qiaolongfei 已提交
607
            # Each example is followed by the corresponding output tensor.
608
            y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
609 610
            fluid.layers.reduce_sum(y, dim=[1, 2]) # [10, 26]
            fluid.layers.reduce_sum(y, dim=[0, 1]) # [16, 20]
W
whs 已提交
611

G
guosheng 已提交
612
    """
613 614
    reduce_all, dim = _get_reduce_dim(dim, input)

615
    if in_dygraph_mode():
616
        return _C_ops.sum(input, dim, None, keep_dim)
姜永久 已提交
617 618 619 620 621 622 623
    else:
        attrs = {'dim': dim, 'keep_dim': keep_dim, 'reduce_all': reduce_all}
        check_variable_and_dtype(
            input,
            'input',
            ['float16', 'float32', 'float64', 'int32', 'int64'],
            'reduce_sum',
624
        )
姜永久 已提交
625 626 627 628 629 630 631 632 633 634 635
        helper = LayerHelper('reduce_sum', **locals())
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype()
        )
        helper.append_op(
            type='reduce_sum',
            inputs={'X': input},
            outputs={'Out': out},
            attrs=attrs,
        )
        return out
G
guosheng 已提交
636 637


Y
Yu Yang 已提交
638
def autoincreased_step_counter(counter_name=None, begin=1, step=1):
Y
Yu Yang 已提交
639
    """
640 641
    :api_attr: Static Graph

642 643
    Create an auto-increase variable. which will be automatically increased
    by 1 in every iteration. By default, the first return of this counter is 1,
Y
Yibing Liu 已提交
644
    and the step size is 1.
Y
Yu Yang 已提交
645 646

    Args:
Y
Yibing Liu 已提交
647 648 649
        counter_name(str, optional): The counter name. Default '@STEP_COUNTER@'.
        begin(int, optional): The first return value of this counter. Default 1.
        step(int, optional): The step size. Default 1.
Y
Yu Yang 已提交
650

651
    Returns:
Y
Yibing Liu 已提交
652
        Variable: The auto-increased Variable with data type int64.
Y
yi.wu 已提交
653 654 655 656

    Examples:
        .. code-block:: python

657
           import paddle.fluid as fluid
658 659
           import paddle
           paddle.enable_static()
Y
yi.wu 已提交
660
           global_step = fluid.layers.autoincreased_step_counter(
Y
Yibing Liu 已提交
661
               counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
Y
Yu Yang 已提交
662 663
    """
    helper = LayerHelper('global_step_counter')
Y
Yu Yang 已提交
664 665
    if counter_name is None:
        counter_name = '@STEP_COUNTER@'
Y
Yu Yang 已提交
666
    counter, is_new_var = helper.create_or_get_global_variable(
H
hong 已提交
667 668 669 670
        name=counter_name,
        dtype='int64',
        shape=[1],
        persistable=True,
671 672
        belong_to_optimizer=True,
    )
Y
Yu Yang 已提交
673
    if is_new_var:
674
        helper.set_variable_initializer(
675 676 677 678
            counter,
            initializer=paddle.nn.initializer.ConstantInitializer(
                value=begin - 1, force_cpu=True
            ),
679
        )
W
Wu Yi 已提交
680
        helper.main_program.global_block()._prepend_op(
Y
Yu Yang 已提交
681 682
            type='increment',
            inputs={'X': [counter]},
Y
Yu Yang 已提交
683
            outputs={'Out': [counter]},
684 685
            attrs={'step': float(step)},
        )
Y
Yu Yang 已提交
686 687 688
        counter.stop_gradient = True

    return counter
Y
yangyaming 已提交
689 690


691
def unsqueeze(input, axes, name=None):
Y
Yibing Liu 已提交
692
    """
693
    Insert single-dimensional entries to the shape of a Tensor. Takes one
M
minqiyang 已提交
694 695
    required argument axes, a list of dimensions that will be inserted.
    Dimension indices in axes are as seen in the output tensor.
Y
Yibing Liu 已提交
696

M
minqiyang 已提交
697
    For example:
H
haowang101779990 已提交
698 699 700

    .. code-block:: text

M
minqiyang 已提交
701
      Given a tensor such that tensor with shape [3, 4, 5],
Y
Yibing Liu 已提交
702
      then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
M
minqiyang 已提交
703

Y
Yibing Liu 已提交
704
    Args:
705
        input (Variable): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64.
706
        axes (int|list|tuple|Variable): Indicates the dimensions to be inserted. The data type is ``int32`` . If ``axes`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``axes`` is an Variable, it should be an 1-D Tensor .
707
        name (str|None): Name for this layer.
Y
Yibing Liu 已提交
708 709

    Returns:
710
        Variable: Unsqueezed Tensor, with the same data type as input.
Y
Yibing Liu 已提交
711 712 713 714

    Examples:
        .. code-block:: python

715
            import paddle.fluid as fluid
G
GGBond8488 已提交
716
            x = paddle.static.data(name='x', shape=[-1, 5, 10], dtype="float32")
717
            y = fluid.layers.unsqueeze(input=x, axes=[1])
718

Y
Yibing Liu 已提交
719
    """
姜永久 已提交
720
    if in_dygraph_mode():
L
Leo Chen 已提交
721 722 723
        if isinstance(axes, int):
            axes = [axes]
        elif isinstance(axes, Variable):
724
            axes = axes.numpy().tolist()
L
Leo Chen 已提交
725 726 727 728 729
        elif isinstance(axes, (list, tuple)):
            axes = [
                item.numpy().item(0) if isinstance(item, Variable) else item
                for item in axes
            ]
730
        return _C_ops.unsqueeze(input, axes)
姜永久 已提交
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
    else:
        check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
        check_variable_and_dtype(
            input,
            'input',
            [
                'float16',
                'float32',
                'float64',
                'bool',
                'int8',
                'int16',
                'int32',
                'int64',
                'complex64',
                'complex128',
            ],
            'unsqueeze',
        )
        helper = LayerHelper("unsqueeze2", **locals())
        inputs = {"X": input}
        attrs = {}
753

姜永久 已提交
754 755 756 757 758 759 760 761 762 763
        if isinstance(axes, int):
            axes = [axes]
        if isinstance(axes, Variable):
            axes.stop_gradient = True
            inputs["AxesTensor"] = axes
        elif isinstance(axes, (list, tuple)):
            if utils._contain_var(axes):
                inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes)
            else:
                attrs["axes"] = axes
764

姜永久 已提交
765 766 767 768 769 770 771 772
        out = helper.create_variable_for_type_inference(dtype=input.dtype)
        x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
        helper.append_op(
            type="unsqueeze2",
            inputs=inputs,
            attrs=attrs,
            outputs={"Out": out, "XShape": x_shape},
        )
Y
Yibing Liu 已提交
773

姜永久 已提交
774
        return out
775

776

777
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
姜永久 已提交
778
    if in_dygraph_mode():
779
        op = getattr(_legacy_C_ops, op_name)
780 781 782 783
        if binary_op:
            return op(x, y)
        else:
            return op(x)
姜永久 已提交
784
    else:
785
        check_variable_and_dtype(
姜永久 已提交
786 787
            x,
            "x",
788
            ["bool", "int8", "int16", "int32", "int64", "float32", "float64"],
789 790
            op_name,
        )
姜永久 已提交
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
        if y is not None:
            check_variable_and_dtype(
                y,
                "y",
                [
                    "bool",
                    "int8",
                    "int16",
                    "int32",
                    "int64",
                    "float32",
                    "float64",
                ],
                op_name,
            )
        if out is not None:
            check_type(out, "out", Variable, op_name)
808

姜永久 已提交
809
        helper = LayerHelper(op_name, **locals())
M
minqiyang 已提交
810

姜永久 已提交
811 812 813 814 815
        if binary_op and x.dtype != y.dtype:
            raise ValueError(
                "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
                % (op_name, x.dtype, y.dtype)
            )
M
minqiyang 已提交
816

姜永久 已提交
817 818
        if out is None:
            out = helper.create_variable_for_type_inference(dtype=x.dtype)
M
minqiyang 已提交
819

姜永久 已提交
820 821 822 823 824 825 826 827
        if binary_op:
            helper.append_op(
                type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
            )
        else:
            helper.append_op(
                type=op_name, inputs={"X": x}, outputs={"Out": out}
            )
M
minqiyang 已提交
828

姜永久 已提交
829
        return out