nn.py 41.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Y
Yu Yang 已提交
14
"""
15
All layers just related to the neural network.
Y
Yu Yang 已提交
16
"""
P
peizhilin 已提交
17
import os
S
sneaxiy 已提交
18
import inspect
19 20 21 22 23
import warnings

import numpy as np

import paddle
Y
Yu Yang 已提交
24
from ..layer_helper import LayerHelper
25
from paddle.fluid.framework import _in_legacy_dygraph
26
from ..initializer import Normal, Constant
27 28 29 30 31 32 33 34 35 36 37 38 39
from ..framework import (
    Variable,
    OpProtoHolder,
    _non_static_mode,
    dygraph_only,
    _dygraph_tracer,
    default_main_program,
    _varbase_creator,
    static_only,
    _global_flags,
    _in_legacy_dygraph,
    in_dygraph_mode,
)
40
from ..framework import _current_expected_place
41
from .. import dygraph_utils
Y
yangyaming 已提交
42
from ..param_attr import ParamAttr
43 44 45 46 47
from .layer_function_generator import (
    autodoc,
    templatedoc,
    _generate_doc_string_,
)
48
from .tensor import concat, assign, fill_constant, zeros
49
from . import utils
F
fengjiayi 已提交
50
from .. import unique_name
51
from functools import reduce
52
from .. import core
53
from ...utils import deprecated
54 55 56 57 58 59
from ..data_feeder import (
    convert_dtype,
    check_variable_and_dtype,
    check_type,
    check_dtype,
)
60
from paddle.utils import deprecated
61
from paddle import _C_ops, _legacy_C_ops
62 63
from collections.abc import Iterable

Y
Yu Yang 已提交
64 65

__all__ = [
X
Xin Pan 已提交
66 67 68 69 70
    'fc',
    'embedding',
    'autoincreased_step_counter',
    'clip',
    'clip_by_norm',
C
chengduo 已提交
71 72
    'merge_selected_rows',
    'get_tensor_from_selected_rows',
Y
Yu Yang 已提交
73 74
]

75
OP_NAMEMAPPING = {
76 77 78 79 80 81 82 83
    'elementwise_max': 'maximum',
    'elementwise_min': 'minimum',
    'elementwise_pow': 'elementwise_pow',
    'elementwise_floordiv': 'floor_divide',
    'elementwise_add': 'add',
    'elementwise_sub': 'subtract',
    'elementwise_mul': 'multiply',
    'elementwise_div': 'divide',
C
Chen Weihang 已提交
84
    'elementwise_mod': 'remainder',
85 86
}

Y
Yu Yang 已提交
87

88 89
def _get_reduce_dim(dim, input):
    """
90
    Internal function for reduce_sum, reduce_mean, reduce_prod.
91 92 93 94 95 96 97 98 99
    It computes the attribute reduce_all value based on axis.
    """
    if dim is not None and not isinstance(dim, list):
        if isinstance(dim, (tuple, range)):
            dim = list(dim)
        elif isinstance(dim, int):
            dim = [dim]
        else:
            raise TypeError(
100
                "The type of dim must be int, list, tuple or range, but received {}".format(
101
                    type(dim)
102 103
                )
            )
104 105 106 107 108 109 110 111 112 113
    if dim is None:
        dim = []
    if dim == [] or len(dim) == len(input.shape):
        reduce_all = True
    else:
        reduce_all = False

    return reduce_all, dim


114
@dygraph_only
115 116 117
def _elementwise_op_in_dygraph(
    x, y, axis=-1, act=None, use_mkldnn=False, op_name=None
):
118 119 120 121
    def is_inplace(op_name):
        return op_name[-1] == "_"

    if op_name not in OP_NAMEMAPPING.keys() or axis != -1:
122
        op = getattr(_legacy_C_ops, op_name)
123 124 125
        out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
    else:
        if in_dygraph_mode():
126 127
            op = getattr(
                _C_ops,
128 129
                OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name,
            )
130 131 132
            out = op(x, y)

        if _in_legacy_dygraph():
133
            op = getattr(_legacy_C_ops, op_name)
134
            out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
135 136 137 138 139 140 141 142 143 144 145 146 147 148
    return dygraph_utils._append_activation_in_dygraph(
        out, act, use_mkldnn=use_mkldnn
    )


def fc(
    input,
    size,
    num_flatten_dims=1,
    param_attr=None,
    bias_attr=None,
    act=None,
    name=None,
):
149
    r"""
150 151
    :api_attr: Static Graph

152
    **Fully Connected Layer**
Y
Yu Yang 已提交
153

154 155 156
    This operator creates a fully connected layer in the network. It can take
    a Tensor(or LoDTensor) or a list of Tensor(or LoDTensor) as its inputs(see
    Args in detail). It creates a variable called weight for each input Tensor,
157
    which represents a fully connected weight matrix from each input unit to
158 159 160 161
    each output unit. The fully connected layer multiplies each input Tensor
    with its corresponding weight to produce an output Tensor with shape :math:`[M, size]` ,
    where M is batch size. If a list of Tensor is given, the results of
    multiple output Tensors with shape :math:`[M, size]` will be summed up. If :attr:`bias_attr`
162
    is not None, a bias variable will be created and added to the output.
163
    Finally, if :attr:`act` is not None, it will be applied to the output as well.
C
caoying03 已提交
164

165
    When the input is a single Tensor(or LoDTensor):
C
caoying03 已提交
166

167 168 169 170
    .. math::

        Out = Act({XW + b})

171
    When the input is a list of Tensor(or LoDTensor):
172 173 174

    .. math::

175
        Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
176 177 178

    In the above equation:

179 180 181
    * :math:`N`: Number of the input. N equals to len(input) if input is list of Variable.
    * :math:`X_i`: The i-th input tensor.
    * :math:`W_i`: The i-th weights matrix corresponding i-th input tensor.
C
caoying03 已提交
182
    * :math:`b`: The bias parameter created by this layer (if needed).
183
    * :math:`Act`: The activation function.
184
    * :math:`Out`: The output Tensor.
185 186 187

    .. code-block:: text

188 189 190 191 192 193 194 195 196 197 198 199 200 201
        Case 1:
        Given a single Tensor data_1, and num_flatten_dims = 2:
            data_1.data = [[[0.1, 0.2],
                            [0.3, 0.4]]]
            data_1.shape = (1, 2, 2) # 1 is batch_size

            out = fluid.layers.fc(input=data_1, size=1, num_flatten_dims=2)

        Then output is:
            out.data = [[0.83234344], [0.34936576]]
            out.shape = (1, 2, 1)

        Case 2:
        Given a list of Tensor:
202 203 204 205 206 207 208 209 210 211 212 213 214
            data_1.data = [[[0.1, 0.2],
                           [0.3, 0.4]]]
            data_1.shape = (1, 2, 2) # 1 is batch_size

            data_2 = [[[0.1, 0.2, 0.3]]]
            data_2.shape = (1, 1, 3)

            out = fluid.layers.fc(input=[data_1, data_2], size=2)

        Then:
            out.data = [[0.18669507, 0.1893476]]
            out.shape = (1, 2)

Y
Yu Yang 已提交
215
    Args:
216 217 218
        input (Variable|list of Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` or
            a list of Tensor(or LoDTensor). The dimensions of the input Tensor is at least 2 and the data
            type should be float32 or float64.
T
tianshuo78520a 已提交
219
        size(int): The number of output units in this layer, which also means the feature size of output
220 221
            Tensor(or LoDTensor).
        num_flatten_dims (int): The fc layer can accept an input Tensor with more than
R
ranqiu 已提交
222
            two dimensions. If this happens, the multidimensional tensor will first be flattened
223 224
            into a 2-D matrix. The parameter :attr:`num_flatten_dims` determines how the input
            Tensor is flattened: the first :attr:`num_flatten_dims` (inclusive, index starts from 1)
R
ranqiu 已提交
225
            dimensions will be flatten to form the first dimension of the final matrix (height of
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
            the matrix), and the rest :math:`rank(X) - num\_flatten\_dims` dimensions are flattened to
            form the second dimension of the final matrix (width of the matrix). For example, assuming that
            X is a 5-dimensional Tensor with a shape [2, 3, 4, 5, 6], and :attr:`num_flatten_dims` = 3.
            Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1.
        param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the
            default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
        bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the
            default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
        act (str): Activation to be applied to the output of this layer, such as tanh, softmax,
            sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None.
        name (str, optional): The default value is None.  Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name` .

    Returns:
        Variable: Tensor or LoDTensor calculated by fc layer. The data type is same with input.
241 242

    Raises:
243
        ValueError: If dimensions of the input Tensor is less than 2.
244 245 246 247

    Examples:
        .. code-block:: python

248
          import paddle.fluid as fluid
249 250
          import paddle
          paddle.enable_static()
251
          # when input is single tensor
252
          data = fluid.data(name="data", shape=[-1, 32], dtype="float32")
253
          fc = fluid.layers.fc(input=data, size=1000, act="tanh")
254 255

          # when input are multiple tensors
256 257
          data_1 = fluid.data(name="data_1", shape=[-1, 32], dtype="float32")
          data_2 = fluid.data(name="data_2", shape=[-1, 36], dtype="float32")
258
          fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh")
Y
Yu Yang 已提交
259
    """
C
caoying03 已提交
260
    helper = LayerHelper("fc", **locals())
261
    check_type(input, 'input', (list, tuple, Variable), 'fc')
262 263
    if isinstance(input, (list, tuple)):
        for i, input_x in enumerate(input):
264
            check_type(input_x, 'input[' + str(i) + ']', Variable, 'fc')
Y
Yu Yang 已提交
265
    dtype = helper.input_dtype()
266 267 268
    check_dtype(
        dtype, 'input', ['float16', 'uint16', 'float32', 'float64'], 'fc'
    )
Y
Yu Yang 已提交
269
    mul_results = []
270 271
    for input_var, param_attr in helper.iter_inputs_and_params():
        input_shape = input_var.shape
272 273
        if num_flatten_dims == -1:
            num_flatten_dims = len(input_shape) - 1
Y
Yu Yang 已提交
274 275 276
        param_shape = [
            reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1)
        ] + [size]
Y
ying 已提交
277

278 279 280
        w = helper.create_parameter(
            attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False
        )
X
Xin Pan 已提交
281
        tmp = helper.create_variable_for_type_inference(dtype)
282 283 284 285 286 287
        helper.append_op(
            type="mul",
            inputs={"X": input_var, "Y": w},
            outputs={"Out": tmp},
            attrs={"x_num_col_dims": num_flatten_dims, "y_num_col_dims": 1},
        )
288 289 290 291
        mul_results.append(tmp)

    if len(mul_results) == 1:
        pre_bias = mul_results[0]
292
    else:
X
Xin Pan 已提交
293
        pre_bias = helper.create_variable_for_type_inference(dtype)
294 295 296 297 298 299
        helper.append_op(
            type="sum",
            inputs={"X": mul_results},
            outputs={"Out": pre_bias},
            attrs={"use_mkldnn": False},
        )
300 301 302 303
    # add bias
    pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims)
    # add activation
    return helper.append_activation(pre_activation)
Y
Yu Yang 已提交
304 305


T
tangwei12 已提交
306
@deprecated(since="2.0.0", update_to="paddle.nn.functional.embedding")
307 308 309 310 311 312 313 314 315
def embedding(
    input,
    size,
    is_sparse=False,
    is_distributed=False,
    padding_idx=None,
    param_attr=None,
    dtype='float32',
):
316
    r"""
317
    :api_attr: Static Graph
318

319 320 321 322 323 324 325 326 327 328 329 330
    **WARING:** This OP will be deprecated in a future release. This OP requires the
    last dimension of Tensor shape must be equal to 1. It is recommended to use
    fluid. :ref:`api_fluid_embedding` .

    The operator is used to lookup embeddings vector of ids provided by :attr:`input` .
    It automatically constructs a 2D embedding matrix based on the
    input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .

    This OP requires the last dimension of Tensor shape must be equal to 1. The shape
    of output Tensor is generated by replacing the last dimension of the input Tensor shape
    with emb_size.

331
    **Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
    otherwise the program will throw an exception and exit.

    .. code-block:: text

        Case 1:

        input is a Tensor. padding_idx = -1
            input.data = [[[1], [3]], [[2], [4]], [[4], [127]]]
            input.shape = [3, 2, 1]
        Given size = [128, 16]
        output is a Tensor:
            out.shape = [3, 2, 16]
            out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
                        [0.345421456, 0.524563927, ..., 0.144534654]],

                        [[0.345249859, 0.124939536, ..., 0.194353745],
                        [0.945345345, 0.435394634, ..., 0.435345365]],
349

350 351 352 353
                        [[0.945345345, 0.435394634, ..., 0.435345365],
                        [0.0,         0.0,         ..., 0.0        ]]]  # padding data
        The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
        It will pad all-zero data when ids is 127.
354

355
        Case 2:
356

357 358 359 360 361 362 363 364 365 366 367 368 369 370
        input is a LoDTensor with 1-level LoD. padding_idx = 0
            input.lod = [[2, 3]]
            input.data = [[1], [3], [2], [4], [0]]
            input.shape = [5, 1]
        Given size = [128, 16]
        output is a LoDTensor:
            out.lod = [[2, 3]]
            out.shape = [5, 16]
            out.data = [[0.129435295, 0.244512452, ..., 0.436322452],
                        [0.345421456, 0.524563927, ..., 0.144534654],
                        [0.345249859, 0.124939536, ..., 0.194353745],
                        [0.945345345, 0.435394634, ..., 0.435345365],
                        [0.0,         0.0,         ..., 0.0        ]]  # padding data
        It will pad all-zero data when ids is 0.
Y
Yu Yang 已提交
371 372

    Args:
373 374 375 376 377 378
        input(Variable): A Tensor or LoDTensor with type int64, which contains the id information.
            The last dimension of Tensor shape must be equal to 1. The value of the input id should
            satisfy :math:`0<= id < size[0]` .
        size(tuple|list): The shape of lookup table parameter. It should have two elements which
            indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
        is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
379
            affects the performance of the backwards gradient update. It is recommended to set
380
            True because sparse update is faster. But some optimizer does not support sparse update,
381
            such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
382 383 384 385 386
            :ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
            :ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
            In these case, is_sparse must be False. Default: False.
        is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
            in multi-machine distributed CPU training. Default: False.
387
        padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
388 389 390 391 392 393
            If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
            to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
            encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
            If set None, it makes no effect to output. Default: None.
        param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
            default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
394
            user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
395
            The local word vector needs to be transformed into numpy format, and the shape of local word
T
tianshuo78520a 已提交
396
            vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
397 398 399
            is used to load custom or pre-trained word vectors. See code example 2 for details.
        dtype(str|core.VarDesc.VarType): It refers to the data type of output Tensor.
            It must be float32 or float64. Default: float32.
Y
Yu Yang 已提交
400

401
    Returns:
402
        Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Y
Yu Yang 已提交
403

404 405
    Examples:
        .. code-block:: python
Y
Yu Yang 已提交
406

B
bdzhuxiaoning 已提交
407
          import paddle.fluid as fluid
408
          import numpy as np
409 410
          import paddle
          paddle.enable_static()
411

412 413
          data = fluid.data(name='x', shape=[None, 1], dtype='int64')

T
tianshuo78520a 已提交
414
          # example 1
415 416 417 418 419 420 421 422 423
          emb_1 = fluid.embedding(input=data, size=[128, 64])

          # example 2: load custom or pre-trained word vectors
          weight_data = np.random.random(size=(128, 100))  # word vectors with numpy format
          w_param_attrs = fluid.ParamAttr(
              name="emb_weight",
              learning_rate=0.5,
              initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
              trainable=True)
424
          emb_2 = fluid.layers.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32')
Y
Yu Yang 已提交
425 426 427
    """

    helper = LayerHelper('embedding', **locals())
428 429 430 431 432 433 434 435 436
    check_variable_and_dtype(
        input, 'input', ['int64'], 'fluid.layers.embedding'
    )
    check_dtype(
        dtype,
        'dtype',
        ['uint16', 'float16', 'float32', 'float64'],
        'fluid.layers.embedding',
    )
437 438 439 440 441 442 443 444 445

    if is_distributed:
        is_distributed = False
        warnings.warn(
            "is_distributed is go out of use, `fluid.contrib.layers.sparse_embedding` is your needed"
        )

    remote_prefetch = True if is_sparse else False

446 447 448
    w = helper.create_parameter(
        attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False
    )
X
Xin Pan 已提交
449
    tmp = helper.create_variable_for_type_inference(dtype)
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
    padding_idx = (
        -1
        if padding_idx is None
        else padding_idx
        if padding_idx >= 0
        else (size[0] + padding_idx)
    )
    helper.append_op(
        type='lookup_table',
        inputs={'Ids': input, 'W': w},
        outputs={'Out': tmp},
        attrs={
            'is_sparse': is_sparse,
            'is_distributed': is_distributed,
            'remote_prefetch': remote_prefetch,
            'padding_idx': padding_idx,
        },
    )
Y
Yu Yang 已提交
468 469 470
    return tmp


471 472 473 474 475 476 477 478 479 480 481
def _pull_sparse(
    input,
    size,
    table_id,
    accessor_class,
    name="embedding",
    ctr_label_name="",
    padding_id=0,
    dtype='float32',
    scale_sparse_grad=True,
):
482
    r"""
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
    **Pull Fleet Sparse Layer**

    This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
    Fleet lookup table. The result of this lookup is the embedding of each ID in the
    :attr:`input`.

    Args:
        input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
            contains the IDs information.
        size(int): The embedding size parameter, which indicates the size of
            each embedding vector respectively.
        table_id(int): the fleet table id of this embedding.
        accessor_class(str): the pslib accessor of the table, default is DownpourCtrAccessor.
        ctr_label_name(str): the layer name of click.
        padding_id(int): the padding id during lookup, default is 0.
        dtype(str): The dtype refers to the data type of output tensor. Only supports
            float32 now.
        scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
            is True.

    Returns:
        Variable|list of Variable: The tensor variable storing the embeddings of the \
                  supplied inputs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
          emb = fluid.layers.nn._pull_sparse(
              input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
    """
    helper = LayerHelper(name, **locals())
    inputs = helper.multiple_input()
    outs = [helper.create_variable_for_type_inference(dtype)]
    input_names = [i.name for i in inputs]
    attrs = {
        'EmbeddingDim': size,
        'TableId': table_id,
        'AccessorClass': accessor_class,
        'CtrLabelName': ctr_label_name,
        'PaddingId': padding_id,
        'ScaleSparseGrad': scale_sparse_grad,
        'InputNames': input_names,
        # this is only for compatible with embedding op
528
        'is_distributed': True,
529 530
    }
    # this is only for compatible with embedding op
531 532 533 534 535 536 537 538 539
    w, _ = helper.create_or_get_global_variable(
        name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True
    )
    helper.append_op(
        type='pull_sparse',
        inputs={'Ids': inputs, 'W': w},
        outputs={'Out': outs},
        attrs=attrs,
    )
540 541 542 543 544
    if len(outs) == 1:
        return outs[0]
    return outs


545 546 547 548 549 550 551 552 553 554 555
def _pull_sparse_v2(
    input,
    size,
    table_id,
    accessor_class,
    name="embedding",
    ctr_label_name="",
    padding_id=0,
    dtype='float32',
    scale_sparse_grad=True,
):
556
    r"""
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
    **Pull Fleet Sparse Layer**

    This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
    Fleet lookup table. The result of this lookup is the embedding of each ID in the
    :attr:`input`.

    Args:
        input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
            contains the IDs information.
        size(int): The embedding size parameter, which indicates the size of
            each embedding vector respectively.
        table_id(int): the pslib table id of this embedding.
        accessor_class(str): the fleet accessor of the table, default is DownpourCtrAccessor.
        ctr_label_name(str): the layer name of click.
        padding_id(int): the padding id during lookup, default is 0.
        dtype(str): The dtype refers to the data type of output tensor. Only supports
            float32 now.
        scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
            is True.

    Returns:
        Variable|list of Variable: The tensor variable storing the embeddings of the \
                  supplied inputs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
          emb = fluid.layers.nn._pull_sparse_v2(
              input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
    """
    helper = LayerHelper(name, **locals())
    inputs = helper.multiple_input()
    outs = [helper.create_variable_for_type_inference(dtype)]
    input_names = [i.name for i in inputs]
    attrs = {
        'EmbeddingDim': size,
        'TableId': table_id,
        'AccessorClass': accessor_class,
        'CtrLabelName': ctr_label_name,
        'PaddingId': padding_id,
        'ScaleSparseGrad': scale_sparse_grad,
        'InputNames': input_names,
        # this is only for compatible with embedding op
602
        'is_distributed': True,
603 604
    }
    # this is only for compatible with embedding op
605 606 607 608 609 610 611 612 613
    w, _ = helper.create_or_get_global_variable(
        name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True
    )
    helper.append_op(
        type='pull_sparse_v2',
        inputs={'Ids': inputs, 'W': w},
        outputs={'Out': outs},
        attrs=attrs,
    )
614
    if len(outs) == 1:
Y
yaoxuefeng 已提交
615 616 617 618
        return outs[0]
    return outs


619 620 621
def _pull_gpups_sparse(
    input, size, dtype='float32', is_distributed=False, is_sparse=False
):
Y
yaoxuefeng 已提交
622 623 624 625 626 627 628 629 630 631 632 633 634
    r"""
    **Pull GpuPS Sparse Layer**

    This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
    GpuPS lookup table. The result of this lookup is the embedding of each ID in the
    :attr:`input`.

    Args:
        input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
            contains the IDs information.
        size(int|list of int): The embedding size parameter of each input, which indicates the size of
            each embedding vector respectively.
        dtype(str): The dtype refers to the data type of output tensor. Only supports
635
        float32 now.
Y
yaoxuefeng 已提交
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654

    Returns:
        Variable|list of Variable: The tensor variable storing the embeddings of the \
                  supplied inputs, whose size are indicated by size respectively.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          slots = []
          data_1 = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
          slots.append(data_1)
          data_2 = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
          slots.append(data_2)
          embs = fluid.layers.pull_gpups_sparse(input=slots, size=[11, 35])
    """
    helper = LayerHelper('pull_gpups_sparse', **locals())
    if dtype != 'float32':
        raise ValueError(
655 656 657
            "GpuPS only support float type embedding now, and your type is: "
            + dtype
        )
Y
yaoxuefeng 已提交
658 659 660 661 662 663
    helper.input_dtype()
    inputs = helper.multiple_input()
    outs = [
        helper.create_variable_for_type_inference(dtype)
        for i in range(len(inputs))
    ]
664 665 666 667 668 669 670 671 672 673 674 675 676
    w = helper.create_parameter(
        attr=helper.param_attr, shape=[size[0]], dtype=dtype, is_bias=False
    )
    helper.append_op(
        type='pull_gpups_sparse',
        inputs={'Ids': inputs, 'W': w},
        outputs={'Out': outs},
        attrs={
            'size': size,
            'is_distributed': is_distributed,
            'is_sparse': is_sparse,
        },
    )
Y
yaoxuefeng 已提交
677
    if len(outs) == 1:
678 679 680 681
        return outs[0]
    return outs


682 683 684
def _pull_box_sparse(
    input, size, dtype='float32', is_distributed=False, is_sparse=False
):
685
    r"""
H
hutuxian 已提交
686 687 688 689 690 691 692
    **Pull Box Sparse Layer**

    This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
    BoxPS lookup table. The result of this lookup is the embedding of each ID in the
    :attr:`input`.

    Args:
693
        input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
H
hutuxian 已提交
694
            contains the IDs information.
695
        size(int): The embedding size parameter, which indicates the size of
H
hutuxian 已提交
696
            each embedding vector respectively.
697
        dtype(str): The dtype refers to the data type of output tensor. Only supports
698
        float32 now.
H
hutuxian 已提交
699 700 701 702 703 704 705 706 707 708

    Returns:
        Variable|list of Variable: The tensor variable storing the embeddings of the \
                  supplied inputs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
709
          emb = fluid.layers.pull_box_sparse(input=data, size=[11])
H
hutuxian 已提交
710 711 712 713
    """
    helper = LayerHelper('pull_box_sparse', **locals())
    if dtype != 'float32':
        raise ValueError(
714 715 716
            "BoxPS only support float type embedding now, and your type is: "
            + dtype
        )
H
hutuxian 已提交
717 718 719 720 721 722
    helper.input_dtype()
    inputs = helper.multiple_input()
    outs = [
        helper.create_variable_for_type_inference(dtype)
        for i in range(len(inputs))
    ]
723 724 725 726 727 728 729 730 731 732 733 734 735
    w = helper.create_parameter(
        attr=helper.param_attr, shape=[size], dtype=dtype, is_bias=False
    )
    helper.append_op(
        type='pull_box_sparse',
        inputs={'Ids': inputs, 'W': w},
        outputs={'Out': outs},
        attrs={
            'size': size,
            'is_distributed': is_distributed,
            'is_sparse': is_sparse,
        },
    )
H
hutuxian 已提交
736 737 738 739 740
    if len(outs) == 1:
        return outs[0]
    return outs


C
caoying03 已提交
741
def reduce_sum(input, dim=None, keep_dim=False, name=None):
G
guosheng 已提交
742
    """
743

Y
yangyaming 已提交
744
    Computes the sum of tensor elements over the given dimension.
G
guosheng 已提交
745 746

    Args:
747 748 749
        input (Variable): The input variable which is a Tensor, the data type is float32,
            float64, int32, int64.
        dim (list|int, optional): The dimensions along which the sum is performed. If
Y
yangyaming 已提交
750 751
            :attr:`None`, sum all elements of :attr:`input` and return a
            Tensor variable with a single element, otherwise must be in the
W
whs 已提交
752 753
            range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
            the dimension to reduce is :math:`rank + dim[i]`.
754
        keep_dim (bool, optional): Whether to reserve the reduced dimension in the
Y
yangyaming 已提交
755
            output Tensor. The result tensor will have one fewer dimension
756 757 758 759
            than the :attr:`input` unless :attr:`keep_dim` is true, default
            value is False.
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
G
guosheng 已提交
760 761

    Returns:
762 763
        Variable: Tensor, results of summation operation on the specified dim of input tensor,
        it's data type is the same as input's Tensor.
F
fengjiayi 已提交
764

765 766
    Raises:
        TypeError, if out data type is different with the input data type.
767

G
guosheng 已提交
768 769 770
    Examples:
        .. code-block:: python

771
            import paddle.fluid as fluid
772 773
            import paddle
            paddle.enable_static()
G
guosheng 已提交
774 775 776
            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
Q
qiaolongfei 已提交
777
            # Each example is followed by the corresponding output tensor.
778
            x = fluid.data(name='x', shape=[2, 4], dtype='float32')
G
guosheng 已提交
779 780 781 782
            fluid.layers.reduce_sum(x)  # [3.5]
            fluid.layers.reduce_sum(x, dim=0)  # [0.3, 0.5, 1.1, 1.6]
            fluid.layers.reduce_sum(x, dim=-1)  # [1.9, 1.6]
            fluid.layers.reduce_sum(x, dim=1, keep_dim=True)  # [[1.9], [1.6]]
W
whs 已提交
783

784
            # y is a Tensor variable with shape [2, 2, 2] and elements as below:
W
whs 已提交
785 786
            #      [[[1, 2], [3, 4]],
            #      [[5, 6], [7, 8]]]
Q
qiaolongfei 已提交
787
            # Each example is followed by the corresponding output tensor.
788
            y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
789 790
            fluid.layers.reduce_sum(y, dim=[1, 2]) # [10, 26]
            fluid.layers.reduce_sum(y, dim=[0, 1]) # [16, 20]
W
whs 已提交
791

G
guosheng 已提交
792
    """
793 794
    reduce_all, dim = _get_reduce_dim(dim, input)

795
    if in_dygraph_mode():
796
        return _C_ops.sum(input, dim, None, keep_dim)
797
    elif _in_legacy_dygraph():
798 799 800
        return _legacy_C_ops.reduce_sum(
            input, 'dim', dim, 'keep_dim', keep_dim, 'reduce_all', reduce_all
        )
801
    attrs = {'dim': dim, 'keep_dim': keep_dim, 'reduce_all': reduce_all}
802
    check_variable_and_dtype(
803 804 805 806 807
        input,
        'input',
        ['float16', 'float32', 'float64', 'int32', 'int64'],
        'reduce_sum',
    )
808
    helper = LayerHelper('reduce_sum', **locals())
X
Xin Pan 已提交
809
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
810 811 812 813 814 815
    helper.append_op(
        type='reduce_sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs=attrs,
    )
G
guosheng 已提交
816
    return out
G
guosheng 已提交
817 818


Y
Yu Yang 已提交
819
def autoincreased_step_counter(counter_name=None, begin=1, step=1):
Y
Yu Yang 已提交
820
    """
821 822
    :api_attr: Static Graph

823 824
    Create an auto-increase variable. which will be automatically increased
    by 1 in every iteration. By default, the first return of this counter is 1,
Y
Yibing Liu 已提交
825
    and the step size is 1.
Y
Yu Yang 已提交
826 827

    Args:
Y
Yibing Liu 已提交
828 829 830
        counter_name(str, optional): The counter name. Default '@STEP_COUNTER@'.
        begin(int, optional): The first return value of this counter. Default 1.
        step(int, optional): The step size. Default 1.
Y
Yu Yang 已提交
831

832
    Returns:
Y
Yibing Liu 已提交
833
        Variable: The auto-increased Variable with data type int64.
Y
yi.wu 已提交
834 835 836 837

    Examples:
        .. code-block:: python

838
           import paddle.fluid as fluid
839 840
           import paddle
           paddle.enable_static()
Y
yi.wu 已提交
841
           global_step = fluid.layers.autoincreased_step_counter(
Y
Yibing Liu 已提交
842
               counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
Y
Yu Yang 已提交
843 844
    """
    helper = LayerHelper('global_step_counter')
Y
Yu Yang 已提交
845 846
    if counter_name is None:
        counter_name = '@STEP_COUNTER@'
Y
Yu Yang 已提交
847
    counter, is_new_var = helper.create_or_get_global_variable(
H
hong 已提交
848 849 850 851
        name=counter_name,
        dtype='int64',
        shape=[1],
        persistable=True,
852 853
        belong_to_optimizer=True,
    )
Y
Yu Yang 已提交
854
    if is_new_var:
855 856 857
        helper.set_variable_initializer(
            counter, initializer=Constant(value=begin - 1, force_cpu=True)
        )
W
Wu Yi 已提交
858
        helper.main_program.global_block()._prepend_op(
Y
Yu Yang 已提交
859 860
            type='increment',
            inputs={'X': [counter]},
Y
Yu Yang 已提交
861
            outputs={'Out': [counter]},
862 863
            attrs={'step': float(step)},
        )
Y
Yu Yang 已提交
864 865 866
        counter.stop_gradient = True

    return counter
Y
yangyaming 已提交
867 868


869
def unsqueeze(input, axes, name=None):
Y
Yibing Liu 已提交
870
    """
871
    Insert single-dimensional entries to the shape of a Tensor. Takes one
M
minqiyang 已提交
872 873
    required argument axes, a list of dimensions that will be inserted.
    Dimension indices in axes are as seen in the output tensor.
Y
Yibing Liu 已提交
874

M
minqiyang 已提交
875
    For example:
H
haowang101779990 已提交
876 877 878

    .. code-block:: text

M
minqiyang 已提交
879
      Given a tensor such that tensor with shape [3, 4, 5],
Y
Yibing Liu 已提交
880
      then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
M
minqiyang 已提交
881

Y
Yibing Liu 已提交
882
    Args:
883
        input (Variable): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64.
884
        axes (int|list|tuple|Variable): Indicates the dimensions to be inserted. The data type is ``int32`` . If ``axes`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``axes`` is an Variable, it should be an 1-D Tensor .
885
        name (str|None): Name for this layer.
Y
Yibing Liu 已提交
886 887

    Returns:
888
        Variable: Unsqueezed Tensor, with the same data type as input.
Y
Yibing Liu 已提交
889 890 891 892

    Examples:
        .. code-block:: python

893 894 895
            import paddle.fluid as fluid
            x = fluid.layers.data(name='x', shape=[5, 10])
            y = fluid.layers.unsqueeze(input=x, axes=[1])
896

Y
Yibing Liu 已提交
897
    """
J
Jiabin Yang 已提交
898
    if _non_static_mode():
L
Leo Chen 已提交
899 900 901
        if isinstance(axes, int):
            axes = [axes]
        elif isinstance(axes, Variable):
902
            axes = axes.numpy().tolist()
L
Leo Chen 已提交
903 904 905 906 907
        elif isinstance(axes, (list, tuple)):
            axes = [
                item.numpy().item(0) if isinstance(item, Variable) else item
                for item in axes
            ]
908
        if _in_legacy_dygraph():
909
            out, _ = _legacy_C_ops.unsqueeze2(input, 'axes', axes)
910
            return out
911
        return _C_ops.unsqueeze(input, axes)
912 913

    check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
    check_variable_and_dtype(
        input,
        'input',
        [
            'float16',
            'float32',
            'float64',
            'bool',
            'int8',
            'int16',
            'int32',
            'int64',
            'complex64',
            'complex128',
        ],
        'unsqueeze',
    )
931 932 933 934 935 936 937 938 939 940
    helper = LayerHelper("unsqueeze2", **locals())
    inputs = {"X": input}
    attrs = {}

    if isinstance(axes, int):
        axes = [axes]
    if isinstance(axes, Variable):
        axes.stop_gradient = True
        inputs["AxesTensor"] = axes
    elif isinstance(axes, (list, tuple)):
L
Leo Chen 已提交
941
        if utils._contain_var(axes):
942
            inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes)
943 944 945
        else:
            attrs["axes"] = axes

X
Xin Pan 已提交
946 947
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
948 949 950 951 952 953
    helper.append_op(
        type="unsqueeze2",
        inputs=inputs,
        attrs=attrs,
        outputs={"Out": out, "XShape": x_shape},
    )
Y
Yibing Liu 已提交
954

955 956
    return out

957

958
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
J
Jiabin Yang 已提交
959
    if _non_static_mode():
960
        op = getattr(_legacy_C_ops, op_name)
961 962 963 964
        if binary_op:
            return op(x, y)
        else:
            return op(x)
965
    check_variable_and_dtype(
966 967
        x,
        "x",
968
        ["bool", "int8", "int16", "int32", "int64", "float32", "float64"],
969 970
        op_name,
    )
971
    if y is not None:
972
        check_variable_and_dtype(
973 974
            y,
            "y",
975
            ["bool", "int8", "int16", "int32", "int64", "float32", "float64"],
976 977
            op_name,
        )
978
    if out is not None:
979
        check_type(out, "out", Variable, op_name)
980

M
minqiyang 已提交
981 982
    helper = LayerHelper(op_name, **locals())

983 984 985
    if binary_op and x.dtype != y.dtype:
        raise ValueError(
            "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
986 987
            % (op_name, x.dtype, y.dtype)
        )
M
minqiyang 已提交
988 989

    if out is None:
990
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
M
minqiyang 已提交
991 992

    if binary_op:
993 994 995
        helper.append_op(
            type=op_name, inputs={"X": x, "Y": y}, outputs={"Out": out}
        )
M
minqiyang 已提交
996 997 998 999 1000 1001
    else:
        helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out})

    return out


1002 1003 1004
@templatedoc()
def clip(x, min, max, name=None):
    """
1005
        :old_api: paddle.fluid.layers.clip
1006

1007 1008 1009 1010
    ${comment}

    Args:
        x(${x_type}): ${x_comment}
S
SunGaofeng 已提交
1011 1012
        min(float): ${min_comment}
        max(float): ${max_comment}
1013 1014
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
S
SunGaofeng 已提交
1015
                             For more information, please refer to :ref:`api_guide_Name`
1016 1017

    Returns:
S
SunGaofeng 已提交
1018 1019 1020 1021
        ${out_comment}

    Return Type:
        ${out_type}
1022 1023 1024 1025

    Examples:
        .. code-block:: python

S
SunGaofeng 已提交
1026
            import paddle.fluid as fluid
S
SunGaofeng 已提交
1027
            input = fluid.data(
1028 1029
                name='data', shape=[1], dtype='float32')
            reward = fluid.layers.clip(x=input, min=-1.0, max=1.0)
1030 1031 1032
    """

    helper = LayerHelper("clip", **locals())
1033
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'clip')
1034 1035

    if name is None:
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
        name = unique_name.generate_with_ignorable_key(
            ".".join([helper.name, 'tmp'])
        )

    out = helper.create_variable(
        type=x.type, name=name, dtype=x.dtype, persistable=False
    )

    helper.append_op(
        type="clip",
        inputs={"X": x},
        attrs={"min": min, "max": max},
        outputs={"Out": out},
    )
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061

    return out


@templatedoc()
def clip_by_norm(x, max_norm, name=None):
    """
    ${comment}

    Args:
        x(${x_type}): ${x_comment}
        max_norm(${max_norm_type}): ${max_norm_comment}
1062 1063 1064
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.
1065 1066

    Returns:
1067
        Tensor:
W
wangguanzhong 已提交
1068

1069
        out(${out_type}): ${out_comment}
1070

W
wangguanzhong 已提交
1071

1072 1073 1074
    Examples:
        .. code-block:: python

1075
            import paddle
1076
            import paddle.fluid as fluid
1077

1078 1079 1080
            input = paddle.to_tensor([[2.0, 2.0], [2.0, 2.0]], dtype='float32')
            reward = fluid.layers.clip_by_norm(x=input, max_norm=1.0)
            # [[0.5, 0.5], [0.5, 0.5]]
1081 1082
    """

L
lyq 已提交
1083
    if in_dygraph_mode():
1084
        return _C_ops.clip_by_norm(x, max_norm)
J
Jiabin Yang 已提交
1085
    if _non_static_mode():
1086
        return _legacy_C_ops.clip_by_norm(x, 'max_norm', max_norm)
1087

1088
    helper = LayerHelper("clip_by_norm", **locals())
1089
    check_variable_and_dtype(x, 'X', ['float32', 'float16'], 'clip_by_norm')
1090
    check_type(max_norm, 'max_norm', (float), 'clip_by_norm')
1091 1092

    if name is None:
1093 1094 1095
        name = unique_name.generate_with_ignorable_key(
            ".".join([helper.name, 'tmp'])
        )
S
sneaxiy 已提交
1096

1097 1098 1099
    out = helper.create_variable(
        type=x.type, name=name, dtype=x.dtype, persistable=False
    )
1100

1101 1102 1103 1104 1105 1106
    helper.append_op(
        type="clip_by_norm",
        inputs={"X": x},
        attrs={"max_norm": max_norm},
        outputs={"Out": out},
    )
1107 1108

    return out
X
Xin Pan 已提交
1109 1110


C
chengduo 已提交
1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
@templatedoc()
def merge_selected_rows(x, name=None):
    """
    ${comment}

    Args:
        x(${x_type}): ${x_comment}
        name(basestring|None): Name of the output.

    Returns:
        out(${out_type}): ${out_comment}
1122 1123 1124 1125

    Examples:
        .. code-block:: python

1126
            import paddle.fluid as fluid
1127 1128 1129 1130 1131
            b = fluid.default_main_program().global_block()
            var = b.create_var(
                name="X", dtype="float32", persistable=True,
                type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
            y = fluid.layers.merge_selected_rows(var)
C
chengduo 已提交
1132
    """
1133 1134 1135
    if in_dygraph_mode():
        return _C_ops.merge_selected_rows(x)

1136
    if _non_static_mode():
1137
        return _legacy_C_ops.merge_selected_rows(x)
C
chengduo 已提交
1138 1139 1140

    helper = LayerHelper("merge_selected_rows", **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1141 1142 1143 1144 1145 1146
    helper.append_op(
        type="merge_selected_rows",
        inputs={"X": x},
        attrs={},
        outputs={"Out": out},
    )
C
chengduo 已提交
1147 1148 1149 1150 1151 1152
    return out


@templatedoc()
def get_tensor_from_selected_rows(x, name=None):
    """
1153 1154 1155 1156 1157 1158 1159 1160 1161
    This operator gets tensor data from input with SelectedRows type, and outputs a LoDTensor.

    .. code-block:: text

        input x is SelectedRows:
           x.rows = [0, 5, 5, 4, 19]
           x.height = 20
           x.value = [[1, 1] [2, 2] [2, 2] [3, 3] [6, 6]]

1162
        Output is LoDTensor:
1163 1164 1165 1166 1167 1168
           out.shape = [5, 2]
           out.data = [[1, 1],
                       [2, 2],
                       [2, 2],
                       [3, 3],
                       [6, 6]]
C
chengduo 已提交
1169 1170

    Args:
1171 1172 1173
        x(SelectedRows): Input with SelectedRows type. The data type is float32, float64, int32 or int64.
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name` .
C
chengduo 已提交
1174 1175

    Returns:
1176
        Variable: LoDTensor transformed from SelectedRows. The data type is same with input.
B
bdzhuxiaoning 已提交
1177 1178 1179

    Examples:
        .. code-block:: python
1180

B
bdzhuxiaoning 已提交
1181 1182 1183 1184
            import paddle.fluid as fluid
            b = fluid.default_main_program().global_block()
            input = b.create_var(name="X", dtype="float32", persistable=True, type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
            out = fluid.layers.get_tensor_from_selected_rows(input)
C
chengduo 已提交
1185 1186
    """

1187 1188 1189 1190 1191
    check_type(x, 'x', Variable, 'get_tensor_from_selected_rows')
    if x.type != core.VarDesc.VarType.SELECTED_ROWS:
        raise TypeError(
            "The type of 'x' in get_tensor_from_selected_rows must be SELECTED_ROWS."
        )
C
chengduo 已提交
1192 1193
    helper = LayerHelper('get_tensor_from_selected_rows', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
1194 1195 1196 1197 1198 1199
    helper.append_op(
        type='get_tensor_from_selected_rows',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={},
    )
C
chengduo 已提交
1200
    return out