From 744279fe685dd0b8b426a686d84ad449da02366e Mon Sep 17 00:00:00 2001 From: Kevin Date: Mon, 12 Aug 2019 10:13:12 +0800 Subject: [PATCH] Refine embedding Api doc (#18820) * fix overflow by int32 mul test=develop * fix reference nullptr * fix codestyle test=develop * modify to point in ContextProjectFunctor test=develop * modify to point in ContextProjectFunctor test=develop * modify . to -> test=develop * refine embedding padding_idx doc test=develop * fix math:padding_idx preview bug test=develop * modify API.spec test=develop * fix spell error test=develop * refine dtype parm desc test=develop --- paddle/fluid/API.spec | 2 +- python/paddle/fluid/layers/nn.py | 16 +++++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 64f1b3b436c..3b73291ca52 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -95,7 +95,7 @@ paddle.fluid.initializer.NumpyArrayInitializer ('paddle.fluid.initializer.NumpyA paddle.fluid.initializer.NumpyArrayInitializer.__init__ (ArgSpec(args=['self', 'value'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.layers.fc (ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, None, False, None)), ('document', '1c74f52549814235077ecc34856a95eb')) paddle.fluid.layers.center_loss (ArgSpec(args=['input', 'label', 'num_classes', 'alpha', 'param_attr', 'update_center'], varargs=None, keywords=None, defaults=(True,)), ('document', '7129819d94625c6104054e8187768589')) -paddle.fluid.layers.embedding (ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32')), ('document', '1b4916f765620374ad0fdefe5a352993')) +paddle.fluid.layers.embedding (ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32')), ('document', 'd8e405486a1e4e189b51d6ee28d67b1e')) paddle.fluid.layers.dynamic_lstm (ArgSpec(args=['input', 'size', 'h_0', 'c_0', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'float32', None)), ('document', '6d3ee14da70adfa36d85c40b18716ef2')) paddle.fluid.layers.dynamic_lstmp (ArgSpec(args=['input', 'size', 'proj_size', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'proj_activation', 'dtype', 'name', 'h_0', 'c_0', 'cell_clip', 'proj_clip'], varargs=None, keywords=None, defaults=(None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'tanh', 'float32', None, None, None, None, None)), ('document', 'c37d51aad655c8a9f9b045c64717320a')) paddle.fluid.layers.dynamic_gru (ArgSpec(args=['input', 'size', 'param_attr', 'bias_attr', 'is_reverse', 'gate_activation', 'candidate_activation', 'h_0', 'origin_mode'], varargs=None, keywords=None, defaults=(None, None, False, 'sigmoid', 'tanh', None, False)), ('document', '83617c165827e030636c80486d5de6f3')) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index babf5fd64e7..b269b01ee78 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -463,18 +463,20 @@ def embedding(input, Args: input(Variable): Input is a Tensor Variable, which contains the IDs information. + The value of the input IDs should satisfy :math:`0<= id < size[0]`. size(tuple|list): The shape of the look up table parameter. It should have two elements which indicate the size of the dictionary of embeddings and the size of each embedding vector respectively. is_sparse(bool): The flag indicating whether to use sparse update. is_distributed(bool): Whether to run lookup table from remote parameter server. - padding_idx(int|long|None): If :attr:`None`, it makes no effect to lookup. - Otherwise the given :attr:`padding_idx` indicates padding the output - with zeros whenever lookup encounters it in :attr:`input`. If - :math:`padding_idx < 0`, the :attr:`padding_idx` to use in lookup is - :math:`size[0] + dim`. - param_attr(ParamAttr): Parameters for this layer - dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc + padding_idx(int|long|None): It will output all-zero padding data whenever + lookup encounters :math:`padding\_idx` in Ids. If set :attr:`None`, it makes + no effect to output. If :math:`padding\_idx < 0`, the :math:`padding\_idx` + will automatically be converted to :math:`size[0] + padding\_idx` to use. + Default: None. + param_attr(ParamAttr): Parameters for this layer. + dtype(np.dtype|core.VarDesc.VarType|str): The dtype refers to the data type of output + tensor. It can be float32, float_16, int etc. Returns: Variable: The tensor variable storing the embeddings of the \ -- GitLab