diff --git a/generate_sequence_by_rnn_lm/network_conf.py b/generate_sequence_by_rnn_lm/network_conf.py index f1aceb0b7d70c6a2aec601cf935d2f34500d20fc..f2e59a94298435f280a561a1e079defa2ea84f62 100644 --- a/generate_sequence_by_rnn_lm/network_conf.py +++ b/generate_sequence_by_rnn_lm/network_conf.py @@ -57,4 +57,4 @@ def rnn_lm(vocab_dim, else: cost = paddle.layer.classification_cost(input=output, label=target) - return cost, output + return cost diff --git a/generate_sequence_by_rnn_lm/train.py b/generate_sequence_by_rnn_lm/train.py index d8bffd487a6b8c0b74d28a97b16c689255dd472b..198835829812426f49682b3fdb43f6c35c55a319 100644 --- a/generate_sequence_by_rnn_lm/train.py +++ b/generate_sequence_by_rnn_lm/train.py @@ -43,9 +43,14 @@ def train(topology, # create parameters parameters = paddle.parameters.create(topology) + # create sum evaluator + sum_eval = paddle.evaluator.sum(topology) # create trainer trainer = paddle.trainer.SGD( - cost=topology, parameters=parameters, update_equation=adam_optimizer) + cost=topology, + parameters=parameters, + update_equation=adam_optimizer, + extra_layers=sum_eval) # define the event_handler callback def event_handler(event):