提交 06fd61f8 编写于 作者: J jinyuKing

update text.py

上级 f3e8f301
...@@ -19,7 +19,6 @@ from __future__ import print_function ...@@ -19,7 +19,6 @@ from __future__ import print_function
import os import os
import six import six
import sys import sys
if six.PY2: if six.PY2:
reload(sys) reload(sys)
sys.setdefaultencoding('utf8') sys.setdefaultencoding('utf8')
...@@ -50,8 +49,8 @@ __all__ = [ ...@@ -50,8 +49,8 @@ __all__ = [
'BeamSearchDecoder', 'MultiHeadAttention', 'FFN', 'BeamSearchDecoder', 'MultiHeadAttention', 'FFN',
'TransformerEncoderLayer', 'TransformerEncoder', 'TransformerDecoderLayer', 'TransformerEncoderLayer', 'TransformerEncoder', 'TransformerDecoderLayer',
'TransformerDecoder', 'TransformerBeamSearchDecoder', 'Linear_chain_crf', 'TransformerDecoder', 'TransformerBeamSearchDecoder', 'Linear_chain_crf',
'Crf_decoding', 'SequenceTagging', 'GRUEncoderLayer', 'CNNEncoder', 'Crf_decoding', 'SequenceTagging', 'GRUEncoderLayer', 'SimCNNEncoder',
'BOWEncoder', 'SimpleConvPoolLayer', 'GRUEncoder', 'DynamicGRU', 'LSTMEncoder' 'SimBOWEncoder', 'SimpleConvPoolLayer', 'SimGRUEncoder', 'DynamicGRU', 'SimLSTMEncoder'
] ]
...@@ -89,12 +88,12 @@ class RNNCell(Layer): ...@@ -89,12 +88,12 @@ class RNNCell(Layer):
batch_ref = flatten(batch_ref)[0] batch_ref = flatten(batch_ref)[0]
def _is_shape_sequence(seq): def _is_shape_sequence(seq):
if sys.version_info < (3,): if sys.version_info < (3, ):
integer_types = ( integer_types = (
int, int,
long,) long, )
else: else:
integer_types = (int,) integer_types = (int, )
"""For shape, list/tuple of integer is the finest-grained objection""" """For shape, list/tuple of integer is the finest-grained objection"""
if (isinstance(seq, list) or isinstance(seq, tuple)): if (isinstance(seq, list) or isinstance(seq, tuple)):
if reduce( if reduce(
...@@ -1219,7 +1218,7 @@ class MultiHeadAttention(Layer): ...@@ -1219,7 +1218,7 @@ class MultiHeadAttention(Layer):
# scale dot product attention # scale dot product attention
product = layers.matmul( product = layers.matmul(
x=q, y=k, transpose_y=True, alpha=self.d_model ** -0.5) x=q, y=k, transpose_y=True, alpha=self.d_model**-0.5)
if attn_bias: if attn_bias:
product += attn_bias product += attn_bias
weights = layers.softmax(product) weights = layers.softmax(product)
...@@ -1309,6 +1308,7 @@ class TransformerEncoderLayer(Layer): ...@@ -1309,6 +1308,7 @@ class TransformerEncoderLayer(Layer):
reused_ffn_weights={"reused_fc1": None, reused_ffn_weights={"reused_fc1": None,
"reused_fc2": None}, "reused_fc2": None},
reused_post_ffn_layernorm=None): reused_post_ffn_layernorm=None):
super(TransformerEncoderLayer, self).__init__() super(TransformerEncoderLayer, self).__init__()
self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model, self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model,
...@@ -1556,7 +1556,7 @@ class TransformerDecoder(Layer): ...@@ -1556,7 +1556,7 @@ class TransformerDecoder(Layer):
] ]
# TODO: we should merge GRUCell with BasicGRUCell #TODO: we should merge GRUCell with BasicGRUCell
class GRUCell(RNNCell): class GRUCell(RNNCell):
def __init__(self, def __init__(self,
input_size, input_size,
...@@ -1590,7 +1590,7 @@ class GRUCell(RNNCell): ...@@ -1590,7 +1590,7 @@ class GRUCell(RNNCell):
return [self.hidden_size] return [self.hidden_size]
# TODO: we should merge GRUCell with BasicGRUCell #TODO: we should merge GRUCell with BasicGRUCell
class GRUEncoderCell(RNNCell): class GRUEncoderCell(RNNCell):
def __init__(self, def __init__(self,
num_layers, num_layers,
...@@ -1606,7 +1606,7 @@ class GRUEncoderCell(RNNCell): ...@@ -1606,7 +1606,7 @@ class GRUEncoderCell(RNNCell):
self.gru_cells.append( self.gru_cells.append(
self.add_sublayer( self.add_sublayer(
"gru_%d" % i, "gru_%d" % i,
# BasicGRUCell( #BasicGRUCell(
GRUCell( GRUCell(
input_size=input_size if i == 0 else hidden_size, input_size=input_size if i == 0 else hidden_size,
hidden_size=hidden_size, hidden_size=hidden_size,
...@@ -1673,6 +1673,7 @@ class Linear_chain_crf(fluid.dygraph.Layer): ...@@ -1673,6 +1673,7 @@ class Linear_chain_crf(fluid.dygraph.Layer):
self._transition = value self._transition = value
def forward(self, input, label, length=None): def forward(self, input, label, length=None):
alpha = self._helper.create_variable_for_type_inference( alpha = self._helper.create_variable_for_type_inference(
dtype=self._dtype) dtype=self._dtype)
emission_exps = self._helper.create_variable_for_type_inference( emission_exps = self._helper.create_variable_for_type_inference(
...@@ -1723,6 +1724,7 @@ class Crf_decoding(fluid.dygraph.Layer): ...@@ -1723,6 +1724,7 @@ class Crf_decoding(fluid.dygraph.Layer):
self._transition = value self._transition = value
def forward(self, input, label=None, length=None): def forward(self, input, label=None, length=None):
viterbi_path = self._helper.create_variable_for_type_inference( viterbi_path = self._helper.create_variable_for_type_inference(
dtype=self._dtype) dtype=self._dtype)
this_inputs = { this_inputs = {
...@@ -1919,7 +1921,7 @@ class SimpleConvPoolLayer(Layer): ...@@ -1919,7 +1921,7 @@ class SimpleConvPoolLayer(Layer):
return x return x
class CNNEncoder(Layer): class SimCNNEncoder(Layer):
""" """
simple CNNEncoder for simnet simple CNNEncoder for simnet
""" """
...@@ -1933,7 +1935,7 @@ class CNNEncoder(Layer): ...@@ -1933,7 +1935,7 @@ class CNNEncoder(Layer):
padding_idx, padding_idx,
act act
): ):
super(CNNEncoder, self).__init__() super(SimCNNEncoder, self).__init__()
self.dict_size = dict_size self.dict_size = dict_size
self.emb_dim = emb_dim self.emb_dim = emb_dim
self.filter_size = filter_size self.filter_size = filter_size
...@@ -1962,7 +1964,7 @@ class CNNEncoder(Layer): ...@@ -1962,7 +1964,7 @@ class CNNEncoder(Layer):
emb_out=self.cnn_layer(emb_reshape) emb_out=self.cnn_layer(emb_reshape)
return emb_out return emb_out
class BOWEncoder(Layer): class SimBOWEncoder(Layer):
""" """
simple BOWEncoder for simnet simple BOWEncoder for simnet
""" """
...@@ -1973,7 +1975,7 @@ class BOWEncoder(Layer): ...@@ -1973,7 +1975,7 @@ class BOWEncoder(Layer):
seq_len, seq_len,
padding_idx padding_idx
): ):
super(BOWEncoder, self).__init__() super(SimBOWEncoder, self).__init__()
self.dict_size = dict_size self.dict_size = dict_size
self.bow_dim = bow_dim self.bow_dim = bow_dim
self.seq_len = seq_len self.seq_len = seq_len
...@@ -2034,7 +2036,7 @@ class DynamicGRU(fluid.dygraph.Layer): ...@@ -2034,7 +2036,7 @@ class DynamicGRU(fluid.dygraph.Layer):
res = fluid.layers.concat(res, axis=1) res = fluid.layers.concat(res, axis=1)
return res return res
class GRUEncoder(Layer): class SimGRUEncoder(Layer):
""" """
simple GRUEncoder for simnet simple GRUEncoder for simnet
""" """
...@@ -2046,7 +2048,7 @@ class GRUEncoder(Layer): ...@@ -2046,7 +2048,7 @@ class GRUEncoder(Layer):
padding_idx, padding_idx,
seq_len seq_len
): ):
super(GRUEncoder, self).__init__() super(SimGRUEncoder, self).__init__()
self.dict_size = dict_size self.dict_size = dict_size
self.emb_dim = emb_dim self.emb_dim = emb_dim
self.gru_dim = gru_dim self.gru_dim = gru_dim
...@@ -2071,7 +2073,7 @@ class GRUEncoder(Layer): ...@@ -2071,7 +2073,7 @@ class GRUEncoder(Layer):
gru = fluid.layers.tanh(gru) gru = fluid.layers.tanh(gru)
return gru return gru
class LSTMEncoder(Layer): class SimLSTMEncoder(Layer):
""" """
simple LSTMEncoder for simnet simple LSTMEncoder for simnet
""" """
...@@ -2087,7 +2089,7 @@ class LSTMEncoder(Layer): ...@@ -2087,7 +2089,7 @@ class LSTMEncoder(Layer):
""" """
initialize initialize
""" """
super(LSTMEncoder, self).__init__() super(SimLSTMEncoder, self).__init__()
self.dict_size = dict_size self.dict_size = dict_size
self.emb_dim = emb_dim self.emb_dim = emb_dim
self.lstm_dim = lstm_dim self.lstm_dim = lstm_dim
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册