From 6d637a93323336e4bc87cac4180982c1226f0caf Mon Sep 17 00:00:00 2001 From: lilu Date: Thu, 16 May 2019 11:09:09 +0800 Subject: [PATCH] test=develop (#2249) --- .../auto_dialogue_evaluation/net.py | 35 ++++++++++--------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/PaddleNLP/models/dialogue_model_toolkit/auto_dialogue_evaluation/net.py b/PaddleNLP/models/dialogue_model_toolkit/auto_dialogue_evaluation/net.py index 22175ea9..80c77c22 100755 --- a/PaddleNLP/models/dialogue_model_toolkit/auto_dialogue_evaluation/net.py +++ b/PaddleNLP/models/dialogue_model_toolkit/auto_dialogue_evaluation/net.py @@ -11,22 +11,24 @@ import math import paddle.fluid as fluid import paddle + class Network(object): """ Network """ - def __init__(self, - vocab_size, - emb_size, - hidden_size, - clip_value=10.0, - word_emb_name="shared_word_emb", - lstm_W_name="shared_lstm_W", - lstm_bias_name="shared_lstm_bias"): + + def __init__(self, + vocab_size, + emb_size, + hidden_size, + clip_value=10.0, + word_emb_name="shared_word_emb", + lstm_W_name="shared_lstm_W", + lstm_bias_name="shared_lstm_bias"): """ Init function """ - + self.vocab_size = vocab_size self.emb_size = emb_size self.hidden_size = hidden_size @@ -44,8 +46,7 @@ class Network(object): name="context_wordseq", shape=[1], dtype="int64", lod_level=1) response_wordseq = fluid.layers.data( name="response_wordseq", shape=[1], dtype="int64", lod_level=1) - label = fluid.layers.data( - name="label", shape=[1], dtype="float32") + label = fluid.layers.data(name="label", shape=[1], dtype="float32") self._feed_name = ["context_wordseq", "response_wordseq", "label"] self._feed_infer_name = ["context_wordseq", "response_wordseq"] @@ -58,7 +59,7 @@ class Network(object): param_attr=fluid.ParamAttr( name=self.word_emb_name, initializer=fluid.initializer.Normal(scale=0.1))) - + response_emb = fluid.layers.embedding( input=response_wordseq, size=[self.vocab_size, self.emb_size], @@ -66,7 +67,7 @@ class Network(object): param_attr=fluid.ParamAttr( name=self.word_emb_name, initializer=fluid.initializer.Normal(scale=0.1))) - + #fc to fit dynamic LSTM context_fc = fluid.layers.fc( input=context_emb, @@ -96,10 +97,10 @@ class Network(object): bias_attr=fluid.ParamAttr(name=self.lstm_bias_name)) response_rep = fluid.layers.sequence_last_step(input=response_rep) print('response_rep shape: %s' % str(response_rep.shape)) - + logits = fluid.layers.bilinear_tensor_product( context_rep, response_rep, size=1) - print('logits shape: %s' % str(logits.shape)) #[batch,1] + print('logits shape: %s' % str(logits.shape)) #[batch,1] if loss_type == 'CLS': loss = fluid.layers.sigmoid_cross_entropy_with_logits(logits, label) @@ -111,6 +112,7 @@ class Network(object): elif loss_type == 'L2': norm_score = 2 * fluid.layers.sigmoid(logits) loss = fluid.layers.square_error_cost(norm_score, label) / 4 + loss = fluid.layers.reduce_mean(loss) else: raise ValueError @@ -129,10 +131,9 @@ class Network(object): Return feed names """ return self._feed_name - + def get_feed_inference_names(self): """ Return inference names """ return self._feed_infer_name - -- GitLab