提交 06fd61f8 编写于 作者: J jinyuKing

update text.py

上级 f3e8f301
......@@ -19,7 +19,6 @@ from __future__ import print_function
import os
import six
import sys
if six.PY2:
reload(sys)
sys.setdefaultencoding('utf8')
......@@ -50,8 +49,8 @@ __all__ = [
'BeamSearchDecoder', 'MultiHeadAttention', 'FFN',
'TransformerEncoderLayer', 'TransformerEncoder', 'TransformerDecoderLayer',
'TransformerDecoder', 'TransformerBeamSearchDecoder', 'Linear_chain_crf',
'Crf_decoding', 'SequenceTagging', 'GRUEncoderLayer', 'CNNEncoder',
'BOWEncoder', 'SimpleConvPoolLayer', 'GRUEncoder', 'DynamicGRU', 'LSTMEncoder'
'Crf_decoding', 'SequenceTagging', 'GRUEncoderLayer', 'SimCNNEncoder',
'SimBOWEncoder', 'SimpleConvPoolLayer', 'SimGRUEncoder', 'DynamicGRU', 'SimLSTMEncoder'
]
......@@ -89,12 +88,12 @@ class RNNCell(Layer):
batch_ref = flatten(batch_ref)[0]
def _is_shape_sequence(seq):
if sys.version_info < (3,):
if sys.version_info < (3, ):
integer_types = (
int,
long,)
long, )
else:
integer_types = (int,)
integer_types = (int, )
"""For shape, list/tuple of integer is the finest-grained objection"""
if (isinstance(seq, list) or isinstance(seq, tuple)):
if reduce(
......@@ -1219,7 +1218,7 @@ class MultiHeadAttention(Layer):
# scale dot product attention
product = layers.matmul(
x=q, y=k, transpose_y=True, alpha=self.d_model ** -0.5)
x=q, y=k, transpose_y=True, alpha=self.d_model**-0.5)
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
......@@ -1309,6 +1308,7 @@ class TransformerEncoderLayer(Layer):
reused_ffn_weights={"reused_fc1": None,
"reused_fc2": None},
reused_post_ffn_layernorm=None):
super(TransformerEncoderLayer, self).__init__()
self.preprocesser1 = PrePostProcessLayer(preprocess_cmd, d_model,
......@@ -1556,7 +1556,7 @@ class TransformerDecoder(Layer):
]
# TODO: we should merge GRUCell with BasicGRUCell
#TODO: we should merge GRUCell with BasicGRUCell
class GRUCell(RNNCell):
def __init__(self,
input_size,
......@@ -1590,7 +1590,7 @@ class GRUCell(RNNCell):
return [self.hidden_size]
# TODO: we should merge GRUCell with BasicGRUCell
#TODO: we should merge GRUCell with BasicGRUCell
class GRUEncoderCell(RNNCell):
def __init__(self,
num_layers,
......@@ -1606,7 +1606,7 @@ class GRUEncoderCell(RNNCell):
self.gru_cells.append(
self.add_sublayer(
"gru_%d" % i,
# BasicGRUCell(
#BasicGRUCell(
GRUCell(
input_size=input_size if i == 0 else hidden_size,
hidden_size=hidden_size,
......@@ -1673,6 +1673,7 @@ class Linear_chain_crf(fluid.dygraph.Layer):
self._transition = value
def forward(self, input, label, length=None):
alpha = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
emission_exps = self._helper.create_variable_for_type_inference(
......@@ -1723,6 +1724,7 @@ class Crf_decoding(fluid.dygraph.Layer):
self._transition = value
def forward(self, input, label=None, length=None):
viterbi_path = self._helper.create_variable_for_type_inference(
dtype=self._dtype)
this_inputs = {
......@@ -1919,7 +1921,7 @@ class SimpleConvPoolLayer(Layer):
return x
class CNNEncoder(Layer):
class SimCNNEncoder(Layer):
"""
simple CNNEncoder for simnet
"""
......@@ -1933,7 +1935,7 @@ class CNNEncoder(Layer):
padding_idx,
act
):
super(CNNEncoder, self).__init__()
super(SimCNNEncoder, self).__init__()
self.dict_size = dict_size
self.emb_dim = emb_dim
self.filter_size = filter_size
......@@ -1962,7 +1964,7 @@ class CNNEncoder(Layer):
emb_out=self.cnn_layer(emb_reshape)
return emb_out
class BOWEncoder(Layer):
class SimBOWEncoder(Layer):
"""
simple BOWEncoder for simnet
"""
......@@ -1973,7 +1975,7 @@ class BOWEncoder(Layer):
seq_len,
padding_idx
):
super(BOWEncoder, self).__init__()
super(SimBOWEncoder, self).__init__()
self.dict_size = dict_size
self.bow_dim = bow_dim
self.seq_len = seq_len
......@@ -2034,7 +2036,7 @@ class DynamicGRU(fluid.dygraph.Layer):
res = fluid.layers.concat(res, axis=1)
return res
class GRUEncoder(Layer):
class SimGRUEncoder(Layer):
"""
simple GRUEncoder for simnet
"""
......@@ -2046,7 +2048,7 @@ class GRUEncoder(Layer):
padding_idx,
seq_len
):
super(GRUEncoder, self).__init__()
super(SimGRUEncoder, self).__init__()
self.dict_size = dict_size
self.emb_dim = emb_dim
self.gru_dim = gru_dim
......@@ -2071,7 +2073,7 @@ class GRUEncoder(Layer):
gru = fluid.layers.tanh(gru)
return gru
class LSTMEncoder(Layer):
class SimLSTMEncoder(Layer):
"""
simple LSTMEncoder for simnet
"""
......@@ -2087,7 +2089,7 @@ class LSTMEncoder(Layer):
"""
initialize
"""
super(LSTMEncoder, self).__init__()
super(SimLSTMEncoder, self).__init__()
self.dict_size = dict_size
self.emb_dim = emb_dim
self.lstm_dim = lstm_dim
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册