diff --git a/demo/mnist/api_train_v2.py b/demo/mnist/api_train_v2.py index 06beb7024d1fd07dc327cb4c09d74e1b89a7b8ff..575a32b3222a83afa0f01bef59037dca102773e7 100644 --- a/demo/mnist/api_train_v2.py +++ b/demo/mnist/api_train_v2.py @@ -30,10 +30,11 @@ def main(): result = trainer.test(reader=paddle.reader.batched( paddle.dataset.mnist.test(), batch_size=256)) - print "Pass %d, Batch %d, Cost %f, %s, Testing metrics %s" % ( - event.pass_id, event.batch_id, event.cost, event.metrics, - result.metrics) - + print "Pass %d, Batch %d, Cost %.2f, %s\n" \ + "Testing cost %.2f metrics %s" % ( + event.pass_id, event.batch_id, event.cost, + event.metrics, + result.cost, result.metrics) else: pass diff --git a/python/paddle/v2/event.py b/python/paddle/v2/event.py index a78bcf076cc65e0dfdfc5760e099900418162f35..a429e36b63c9e812332673b66f4d8b99f3303cf8 100644 --- a/python/paddle/v2/event.py +++ b/python/paddle/v2/event.py @@ -34,8 +34,9 @@ class WithMetric(object): class TestResult(WithMetric): - def __init__(self, evaluator): + def __init__(self, evaluator, cost): super(TestResult, self).__init__(evaluator) + self.cost = cost class BeginPass(object): diff --git a/python/paddle/v2/trainer.py b/python/paddle/v2/trainer.py index abaad1d0213c63f92a72b30158d41fd41119d078..b4a713f7d53c2c5aef7e356906f88475037fa8d2 100644 --- a/python/paddle/v2/trainer.py +++ b/python/paddle/v2/trainer.py @@ -120,10 +120,8 @@ class SGD(ITrainer): for each_param in self.__gradient_machine__.getNonStaticParameters( ): updater.update(each_param) - # Get cost. We use numpy to calculate total cost for this batch. - cost_vec = out_args.getSlotValue(0) - cost_vec = cost_vec.copyToNumpyMat() - cost = cost_vec.sum() / len(data_batch) + cost_sum = out_args.sumCosts() + cost = cost_sum / len(data_batch) updater.finishBatch(cost) batch_evaluator.finish() event_handler( @@ -152,13 +150,18 @@ class SGD(ITrainer): evaluator = self.__gradient_machine__.makeEvaluator() out_args = api.Arguments.createArguments(0) evaluator.start() + total_cost = 0 + num_samples = 0.0 for data_batch in reader(): + num_samples += len(data_batch) self.__gradient_machine__.forward( feeder(data_batch), out_args, api.PASS_TEST) + total_cost += out_args.sumCosts() self.__gradient_machine__.eval(evaluator) evaluator.finish() - return v2_event.TestResult(evaluator=evaluator) + return v2_event.TestResult( + evaluator=evaluator, cost=total_cost / num_samples) def __check_train_args__(reader, event_handler, **kwargs):