提交 d9bc9da8 编写于 作者: D dengkaipeng

fix drop rate in attention_lstm.py

上级 67fb7c0d
......@@ -34,6 +34,7 @@ class AttentionLSTM(ModelBase):
self.num_classes = self.cfg.MODEL.num_classes
self.embedding_size = self.cfg.MODEL.embedding_size
self.lstm_size = self.cfg.MODEL.lstm_size
self.drop_rate = self.cfg.MODEL.drop_rate
# get mode configs
self.batch_size = self.get_config_from_sec(self.mode, 'batch_size', 1)
......@@ -87,7 +88,7 @@ class AttentionLSTM(ModelBase):
for i, (input_dim, feature
) in enumerate(zip(self.feature_dims, self.feature_input)):
att = LSTMAttentionModel(input_dim, self.embedding_size,
self.lstm_size)
self.lstm_size, self.drop_rate)
att_out = att.forward(feature, is_training=(self.mode == 'train'))
att_outs.append(att_out)
out = fluid.layers.concat(att_outs, axis=1)
......
......@@ -27,9 +27,7 @@ class LSTMAttentionModel(object):
drop_rate=0.5):
self.lstm_size = lstm_size
self.embedding_size = embedding_size
self.bias_attr = ParamAttr(
regularizer=fluid.regularizer.L2Decay(0.0),
initializer=fluid.initializer.NormalInitializer(scale=0.0))
self.drop_rate = drop_rate
def forward(self, input, is_training):
input_fc = fluid.layers.fc(
......@@ -64,7 +62,7 @@ class LSTMAttentionModel(object):
input=[lstm_forward, lstm_backward], axis=1)
lstm_dropout = fluid.layers.dropout(
x=lstm_concat, dropout_prob=0.5, is_test=(not is_training))
x=lstm_concat, dropout_prob=self.drop_rate, is_test=(not is_training))
lstm_weight = fluid.layers.fc(
input=lstm_dropout,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册