diff --git a/demo/mnist/api_train_v2.py b/demo/mnist/api_train_v2.py index 6439d07ac6157dbbe505c1a3f59bda1b681ee2ae..341a7165da4e2f6bce1d37bfecb9b58abedc44a1 100644 --- a/demo/mnist/api_train_v2.py +++ b/demo/mnist/api_train_v2.py @@ -63,6 +63,8 @@ def main(): label = paddle.layer.data( name='label', type=paddle.data_type.integer_value(10)) + # Here we can build the prediction network in different ways. Please + # choose one by uncomment corresponding line. predict = softmax_regression(images) #predict = multilayer_perceptron(images) #predict = convolutional_neural_network(images) @@ -80,14 +82,20 @@ def main(): parameters=parameters, update_equation=optimizer) + list = [] + def event_handler(event): if isinstance(event, paddle.event.EndIteration): if event.batch_id % 100 == 0: - result = trainer.test(reader=paddle.reader.batched( - paddle.dataset.mnist.test(), batch_size=128)) - print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % ( - event.pass_id, event.batch_id, event.cost, event.metrics, - result.metrics) + print "Pass %d, Batch %d, Cost %f, %s" % ( + event.pass_id, event.batch_id, event.cost, event.metrics) + if isinstance(event, paddle.event.EndPass): + result = trainer.test(reader=paddle.reader.batched( + paddle.dataset.mnist.test(), batch_size=128)) + print "Test with Pass %d, Cost %f, %s\n" % ( + event.pass_id, event.cost, result.metrics) + list.append((event.pass_id, event.cost, + result.metrics['classification_error_evaluator'])) trainer.train( reader=paddle.reader.batched( @@ -97,10 +105,15 @@ def main(): event_handler=event_handler, num_passes=100) + # find the best pass + best = sorted(list, key=lambda list: float(list[1]))[0] + print 'Best pass is %s, testing Avgcost is %s' % (best[0], best[1]) + print 'The classification accuracy is %.2f%%' % (100 - float(best[2]) * 100) + # output is a softmax layer. It returns probabilities. # Shape should be (100, 10) probs = paddle.infer( - output=inference, + output=predict, parameters=parameters, reader=paddle.reader.batched( paddle.reader.firstn( diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py index a78bcf076cc65e0dfdfc5760e099900418162f35..6a7bcb81879c0a364a1d896071055a6247e96c19 100644 --- a/python/paddle/v2/event.py +++ b/python/paddle/v2/event.py @@ -52,8 +52,9 @@ class EndPass(WithMetric): Event On One Pass Training Complete. """ - def __init__(self, pass_id, evaluator): + def __init__(self, pass_id, cost, evaluator): self.pass_id = pass_id + self.cost = cost WithMetric.__init__(self, evaluator) diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index e743a49523ff21627ea2abfb76cee8b9ffd685e2..a4ef0df597dd7f52833a0176333dabc93d02c18d 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -107,6 +107,8 @@ class SGD(ITrainer): event_handler(v2_event.BeginPass(pass_id)) pass_evaluator.start() updater.startPass() + total_cost_sum = 0 + total_batch = 0 for batch_id, data_batch in enumerate(reader()): pass_type = updater.startBatch(len(data_batch)) self.__gradient_machine__.forwardBackward( @@ -127,6 +129,8 @@ class SGD(ITrainer): cost_vec = out_args.getSlotValue(0) cost_vec = cost_vec.copyToNumpyMat() cost = cost_vec.sum() / len(data_batch) + total_cost_sum += cost_vec.sum() + total_batch += len(data_batch) updater.finishBatch(cost) batch_evaluator.finish() event_handler( @@ -138,7 +142,11 @@ class SGD(ITrainer): updater.finishPass() pass_evaluator.finish() - event_handler(v2_event.EndPass(pass_id, evaluator=pass_evaluator)) + event_handler( + v2_event.EndPass( + pass_id, + cost=total_cost_sum / total_batch, + evaluator=pass_evaluator)) self.__gradient_machine__.finish() def default_reader_dict(self):