提交 b6f542dd 编写于 作者: S Steffy-zxf

update doc_string

上级 62520440
......@@ -70,7 +70,7 @@ class Word2vecSkipGram(hub.Module):
Returns:
inputs(dict): the input variables of word2vec_skipgram (words)
outputs(dict): the output variables of input words (word embeddings)
main_program(Program): the main_program of Senta with pretrained prameters
main_program(Program): the main_program of word2vec_skipgram with pretrained prameters
"""
assert num_data >= 1 and num_data <= 3, "num_data(%d) must be 1, 2, or 3" % num_data
main_program = fluid.Program()
......
......@@ -61,7 +61,7 @@ class EmotionDetectionTextCNN(hub.NLPPredictionModule):
inputs(dict): the input variables of emotion_detection_textcnn (words)
outputs(dict): the output variables of input words (word embeddings and label probilities);
the sentence embedding and sequence length of the first input text.
main_program(Program): the main_program of Senta with pretrained prameters
main_program(Program): the main_program of emotion_detection_textcnn with pretrained prameters
"""
assert num_data >= 1 and num_data <= 3, "num_data(%d) must be 1, 2, or 3" % num_data
main_program = fluid.Program()
......
......@@ -2,8 +2,8 @@
import paddle.fluid as fluid
def bilstm_net(data,
dict_dim,
def bilstm_net(emb,
seq_len,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
......@@ -12,15 +12,12 @@ def bilstm_net(data,
"""
Bi-Lstm net
"""
# embedding layer
emb = fluid.layers.embedding(
input=data,
size=[dict_dim, emb_dim],
)
# unpad the token_feature
unpad_feature = fluid.layers.sequence_unpad(emb, length=seq_len)
# bi-lstm layer
fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)
rfc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)
fc0 = fluid.layers.fc(input=unpad_feature, size=hid_dim * 4)
rfc0 = fluid.layers.fc(input=unpad_feature, size=hid_dim * 4)
lstm_h, c = fluid.layers.dynamic_lstm(
input=fc0, size=hid_dim * 4, is_reverse=False)
rlstm_h, c = fluid.layers.dynamic_lstm(
......
......@@ -2,15 +2,15 @@
import paddle.fluid as fluid
def bow_net(data, dict_dim, emb_dim=128, hid_dim=128, hid_dim2=96, class_dim=2):
def bow_net(emb, seq_len, hid_dim=128, hid_dim2=96, class_dim=2):
"""
Bow net
"""
# embedding layer
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
# unpad the token_feature
unpad_feature = fluid.layers.sequence_unpad(emb, length=seq_len)
# bow layer
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow = fluid.layers.sequence_pool(input=unpad_feature, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow)
# full connect layer
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
......
......@@ -2,22 +2,16 @@
import paddle.fluid as fluid
def cnn_net(data,
dict_dim,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
class_dim=2,
win_size=3):
def cnn_net(emb, seq_len, hid_dim=128, hid_dim2=96, class_dim=2, win_size=3):
"""
Conv net
"""
# embedding layer
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
# unpad the token_feature
unpad_feature = fluid.layers.sequence_unpad(emb, length=seq_len)
# convolution layer
conv_3 = fluid.nets.sequence_conv_pool(
input=emb,
input=unpad_feature,
num_filters=hid_dim,
filter_size=win_size,
act="tanh",
......
......@@ -2,8 +2,8 @@
import paddle.fluid as fluid
def gru_net(data,
dict_dim,
def gru_net(emb,
seq_len,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
......@@ -12,10 +12,10 @@ def gru_net(data,
"""
gru net
"""
# embedding layer
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
# unpad the token_feature
unpad_feature = fluid.layers.sequence_unpad(emb, length=seq_len)
fc0 = fluid.layers.fc(input=emb, size=hid_dim * 3)
fc0 = fluid.layers.fc(input=unpad_feature, size=hid_dim * 3)
# GRU layer
gru_h = fluid.layers.dynamic_gru(input=fc0, size=hid_dim, is_reverse=False)
......
......@@ -2,20 +2,14 @@
import paddle.fluid as fluid
def lstm_net(data,
dict_dim,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
class_dim=2,
emb_lr=30.0):
def lstm_net(emb, seq_len, hid_dim=128, hid_dim2=96, class_dim=2, emb_lr=30.0):
"""
Lstm net
"""
# embedding layer
emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim])
# unpad the token_feature
unpad_feature = fluid.layers.sequence_unpad(emb, length=seq_len)
# Lstm layer
fc0 = fluid.layers.fc(input=emb, size=hid_dim * 4)
fc0 = fluid.layers.fc(input=unpad_feature, size=hid_dim * 4)
lstm_h, c = fluid.layers.dynamic_lstm(
input=fc0, size=hid_dim * 4, is_reverse=False)
# max pooling layer
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册