提交 3dfbef29 编写于 作者: J JiabinYang

polish code and add comments for Embedding

上级 53d558cd
...@@ -22,7 +22,7 @@ from . import layers ...@@ -22,7 +22,7 @@ from . import layers
from ..framework import Variable, OpProtoHolder from ..framework import Variable, OpProtoHolder
from ..param_attr import ParamAttr from ..param_attr import ParamAttr
from ..initializer import Normal, Constant from ..initializer import Normal, Constant
__all__ = ['Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'EMBEDDING'] __all__ = ['Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'Embedding']
class Conv2D(layers.Layer): class Conv2D(layers.Layer):
...@@ -415,7 +415,44 @@ class BatchNorm(layers.Layer): ...@@ -415,7 +415,44 @@ class BatchNorm(layers.Layer):
return self._helper.append_activation(batch_norm_out) return self._helper.append_activation(batch_norm_out)
class EMBEDDING(layers.Layer): class Embedding(layers.Layer):
"""
**Embedding Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
a lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
All the input variables are passed in as local variables to the LayerHelper
constructor.
Args:
size(tuple|list): The shape of the look up table parameter. It should
have two elements which indicate the size of the dictionary of
embeddings and the size of each embedding vector respectively.
is_sparse(bool): The flag indicating whether to use sparse update.
is_distributed(bool): Whether to run lookup table from remote parameter server.
padding_idx(int|long|None): If :attr:`None`, it makes no effect to lookup.
Otherwise the given :attr:`padding_idx` indicates padding the output
with zeros whenever lookup encounters it in :attr:`input`. If
:math:`padding_idx < 0`, the :attr:`padding_idx` to use in lookup is
:math:`size[0] + dim`.
param_attr(ParamAttr): Parameters for this layer
dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc
Returns:
Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
dict_size = len(dataset.ids)
input = fluid.layers.data(name='ids', shape=[32, 32], dtype='float32')
embedding = fluid.imperative.Embedding(size=[dict_size, 16])
fc = embedding(input)
"""
def __init__(self, def __init__(self,
size, size,
is_sparse=False, is_sparse=False,
...@@ -424,7 +461,7 @@ class EMBEDDING(layers.Layer): ...@@ -424,7 +461,7 @@ class EMBEDDING(layers.Layer):
param_attr=None, param_attr=None,
dtype='float32'): dtype='float32'):
super(EMBEDDING, self).__init__() super(Embedding, self).__init__()
self._size = size self._size = size
self._is_sparse = is_sparse self._is_sparse = is_sparse
self._is_distributed = is_distributed self._is_distributed = is_distributed
...@@ -440,8 +477,6 @@ class EMBEDDING(layers.Layer): ...@@ -440,8 +477,6 @@ class EMBEDDING(layers.Layer):
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
self._helper = LayerHelper('embedding', param_attr=param_attr) self._helper = LayerHelper('embedding', param_attr=param_attr)
def _build_once(self, input):
self._w = self._helper.create_parameter( self._w = self._helper.create_parameter(
attr=self._param_attr, attr=self._param_attr,
shape=self._size, shape=self._size,
......
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest import unittest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.imperative.nn import EMBEDDING from paddle.fluid.imperative.nn import Embedding
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.fluid.imperative.base import to_variable from paddle.fluid.imperative.base import to_variable
...@@ -143,7 +143,7 @@ class PtbModel(fluid.imperative.Layer): ...@@ -143,7 +143,7 @@ class PtbModel(fluid.imperative.Layer):
num_layers=num_layers, num_layers=num_layers,
init_scale=init_scale, init_scale=init_scale,
dropout=dropout) dropout=dropout)
self.embedding = EMBEDDING( self.embedding = Embedding(
size=[vocab_size, hidden_size], size=[vocab_size, hidden_size],
dtype='float32', dtype='float32',
is_sparse=False, is_sparse=False,
...@@ -151,8 +151,6 @@ class PtbModel(fluid.imperative.Layer): ...@@ -151,8 +151,6 @@ class PtbModel(fluid.imperative.Layer):
name='embedding_para', name='embedding_para',
initializer=fluid.initializer.UniformInitializer( initializer=fluid.initializer.UniformInitializer(
low=-init_scale, high=init_scale))) low=-init_scale, high=init_scale)))
def _build_once(self, input, label, init_hidden, init_cell):
self.softmax_weight = fluid.layers.create_parameter( self.softmax_weight = fluid.layers.create_parameter(
[self.hidden_size, self.vocab_size], [self.hidden_size, self.vocab_size],
dtype="float32", dtype="float32",
...@@ -166,6 +164,9 @@ class PtbModel(fluid.imperative.Layer): ...@@ -166,6 +164,9 @@ class PtbModel(fluid.imperative.Layer):
default_initializer=fluid.initializer.UniformInitializer( default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale)) low=-self.init_scale, high=self.init_scale))
def _build_once(self, input, label, init_hidden, init_cell):
pass
def forward(self, input, label, init_hidden, init_cell): def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape( init_h = fluid.layers.reshape(
...@@ -203,7 +204,7 @@ class PtbModel(fluid.imperative.Layer): ...@@ -203,7 +204,7 @@ class PtbModel(fluid.imperative.Layer):
class TestImperativePtbRnn(unittest.TestCase): class TestImperativePtbRnn(unittest.TestCase):
def test_mnist_cpu_float32(self): def test_ptb_rnn_cpu_float32(self):
seed = 90 seed = 90
hidden_size = 10 hidden_size = 10
vocab_size = 1000 vocab_size = 1000
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册