diff --git a/fluid/adversarial/fluid_mnist.py b/fluid/adversarial/fluid_mnist.py index dc116d7de52bfe4529c6fc977a9753440145b73c..edeb6b0269366392760795cf290b2e3492aff759 100644 --- a/fluid/adversarial/fluid_mnist.py +++ b/fluid/adversarial/fluid_mnist.py @@ -47,7 +47,9 @@ def main(): optimizer = fluid.optimizer.Adam(learning_rate=0.01) optimizer.minimize(avg_cost) - accuracy = fluid.evaluator.Accuracy(input=logits, label=label) + batch_size = fluid.layers.create_tensor(dtype='int64') + batch_acc = fluid.layers.accuracy( + input=logits, label=label, total=batch_size) BATCH_SIZE = 50 PASS_NUM = 3 @@ -63,20 +65,22 @@ def main(): feeder = fluid.DataFeeder(feed_list=[img, label], place=place) exe.run(fluid.default_startup_program()) + pass_acc = fluid.average.WeightedAverage() for pass_id in range(PASS_NUM): - accuracy.reset(exe) + pass_acc.reset() for data in train_reader(): - loss, acc = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost] + accuracy.metrics) - pass_acc = accuracy.eval(exe) - print("pass_id=" + str(pass_id) + " acc=" + str(acc) + " pass_acc=" - + str(pass_acc)) + loss, acc, b_size = exe.run( + fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost, batch_acc, batch_size]) + pass_acc.add(value=acc, weight=b_size) + print("pass_id=" + str(pass_id) + " acc=" + str(acc[0]) + + " pass_acc=" + str(pass_acc.eval()[0])) if loss < LOSS_THRESHOLD and pass_acc > ACC_THRESHOLD: break - pass_acc = accuracy.eval(exe) - print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc)) + print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc.eval()[ + 0])) fluid.io.save_params( exe, dirname='./mnist', main_program=fluid.default_main_program()) print('train mnist done') diff --git a/fluid/image_classification/mobilenet.py b/fluid/image_classification/mobilenet.py index 48d266c02b94155e07d80f7c401987a22ac7c906..adfd6868f45db8d4e6751973b5f2ad91fe7d4048 100644 --- a/fluid/image_classification/mobilenet.py +++ b/fluid/image_classification/mobilenet.py @@ -172,15 +172,16 @@ def train(learning_rate, batch_size, num_passes, model_save_dir='model'): momentum=0.9, regularization=fluid.regularizer.L2Decay(5 * 1e-5)) opts = optimizer.minimize(avg_cost) - accuracy = fluid.evaluator.Accuracy(input=out, label=label) + + b_size_var = fluid.layers.create_tensor(dtype='int64') + b_acc_var = fluid.layers.accuracy(input=out, label=label, total=b_size_var) inference_program = fluid.default_main_program().clone() with fluid.program_guard(inference_program): - test_accuracy = fluid.evaluator.Accuracy(input=out, label=label) - test_target = [avg_cost] + test_accuracy.metrics + test_accuracy.states - inference_program = fluid.io.get_inference_program(test_target) + inference_program = fluid.io.get_inference_program( + target_vars=[b_acc_var, b_size_var]) - place = fluid.CUDAPlace(0) + place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) @@ -190,24 +191,29 @@ def train(learning_rate, batch_size, num_passes, model_save_dir='model'): paddle.dataset.flowers.test(), batch_size=batch_size) feeder = fluid.DataFeeder(place=place, feed_list=[image, label]) + train_pass_acc_evaluator = fluid.average.WeightedAverage() + test_pass_acc_evaluator = fluid.average.WeightedAverage() for pass_id in range(num_passes): - accuracy.reset(exe) + train_pass_acc_evaluator.reset() for batch_id, data in enumerate(train_reader()): - loss, acc = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost] + accuracy.metrics) + loss, acc, size = exe.run( + fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost, b_acc_var, b_size_var]) + train_pass_acc_evaluator.add(value=acc, weight=size) print("Pass {0}, batch {1}, loss {2}, acc {3}".format( pass_id, batch_id, loss[0], acc[0])) - pass_acc = accuracy.eval(exe) - test_accuracy.reset(exe) + test_pass_acc_evaluator.reset() for data in test_reader(): - loss, acc = exe.run(inference_program, - feed=feeder.feed(data), - fetch_list=[avg_cost] + test_accuracy.metrics) - test_pass_acc = test_accuracy.eval(exe) + loss, acc, size = exe.run( + inference_program, + feed=feeder.feed(data), + fetch_list=[avg_cost, b_acc_var, b_size_var]) + test_pass_acc_evaluator.add(value=acc, weight=size) print("End pass {0}, train_acc {1}, test_acc {2}".format( - pass_id, pass_acc, test_pass_acc)) + pass_id, + train_pass_acc_evaluator.eval(), test_pass_acc_evaluator.eval())) if pass_id % 10 == 0: model_path = os.path.join(model_save_dir, str(pass_id)) print 'save models to %s' % (model_path) diff --git a/fluid/text_classification/train.py b/fluid/text_classification/train.py index d0c9c34f02a0435fe7b6c390189aa921a6beef02..d32e1c4c878f4d6ef554cc27e0fc5ffc99f96a4a 100644 --- a/fluid/text_classification/train.py +++ b/fluid/text_classification/train.py @@ -89,12 +89,14 @@ def main(dict_path): sgd_optimizer = fluid.optimizer.SGD(learning_rate=conf.learning_rate) sgd_optimizer.minimize(avg_cost) - accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) + batch_size_var = fluid.layers.create_tensor(dtype='int64') + batch_acc_var = fluid.layers.accuracy( + input=prediction, label=label, total=batch_size_var) inference_program = fluid.default_main_program().clone() with fluid.program_guard(inference_program): - test_target = accuracy.metrics + accuracy.states - inference_program = fluid.io.get_inference_program(test_target) + inference_program = fluid.io.get_inference_program( + target_vars=[batch_acc_var, batch_size_var]) # The training data set. train_reader = paddle.batch( @@ -119,31 +121,37 @@ def main(dict_path): exe.run(fluid.default_startup_program()) + train_pass_acc_evaluator = fluid.average.WeightedAverage() + test_pass_acc_evaluator = fluid.average.WeightedAverage() + def test(exe): - accuracy.reset(exe) + test_pass_acc_evaluator.reset() for batch_id, data in enumerate(test_reader()): input_seq = to_lodtensor(map(lambda x: x[0], data), place) y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = y_data.reshape([-1, 1]) - acc = exe.run(inference_program, - feed={"words": input_seq, - "label": y_data}) - test_acc = accuracy.eval(exe) + b_acc, b_size = exe.run(inference_program, + feed={"words": input_seq, + "label": y_data}, + fetch_list=[batch_acc_var, batch_size_var]) + test_pass_acc_evaluator.add(value=b_acc, weight=b_size) + test_acc = test_pass_acc_evaluator.eval() return test_acc total_time = 0. for pass_id in xrange(conf.num_passes): - accuracy.reset(exe) + train_pass_acc_evaluator.reset() start_time = time.time() for batch_id, data in enumerate(train_reader()): - cost_val, acc_val = exe.run( + cost_val, acc_val, size_val = exe.run( fluid.default_main_program(), feed=feeder.feed(data), - fetch_list=[avg_cost, accuracy.metrics[0]]) - pass_acc = accuracy.eval(exe) + fetch_list=[avg_cost, batch_acc_var, batch_size_var]) + train_pass_acc_evaluator.add(value=acc_val, weight=size_val) if batch_id and batch_id % conf.log_period == 0: - print("Pass id: %d, batch id: %d, cost: %f, pass_acc %f" % - (pass_id, batch_id, cost_val, pass_acc)) + print("Pass id: %d, batch id: %d, cost: %f, pass_acc: %f" % + (pass_id, batch_id, cost_val, + train_pass_acc_evaluator.eval())) end_time = time.time() total_time += (end_time - start_time) pass_test_acc = test(exe)