未验证 提交 ad7ba363 编写于 作者: Z zhang wenhui 提交者: GitHub

Merge pull request #1480 from frankwhzhang/tagspace_dt

add tagspace for 1.2
......@@ -71,7 +71,7 @@ def train():
buffer_size=batch_size*100, is_train=True)
""" train network """
# Train program
text, pos_tag, neg_tag, avg_cost, correct, cos_pos = net.network(vocab_text_size, vocab_tag_size, neg_size=neg_size)
avg_cost, correct, cos_pos = net.network(vocab_text_size, vocab_tag_size, neg_size=neg_size)
# Optimization to minimize lost
sgd_optimizer = fluid.optimizer.SGD(learning_rate=args.base_lr)
......
......@@ -46,4 +46,4 @@ def network(vocab_text_size, vocab_tag_size, emb_dim=10, hid_dim=1000, win_size=
avg_cost = nn.mean(loss_part3)
less = tensor.cast(cf.less_than(cos_neg, cos_pos), dtype='float32')
correct = nn.reduce_sum(less)
return text, pos_tag, neg_tag, avg_cost, correct, cos_pos
return avg_cost, correct, cos_pos
\ No newline at end of file
......@@ -64,7 +64,7 @@ def train():
buffer_size=batch_size*100, is_train=True)
""" train network """
# Train program
text, pos_tag, neg_tag, avg_cost, correct, cos_pos = net.network(vocab_text_size, vocab_tag_size, neg_size=neg_size)
avg_cost, correct, cos_pos = net.network(vocab_text_size, vocab_tag_size, neg_size=neg_size)
# Optimization to minimize lost
sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=args.base_lr)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册