未验证 提交 131f0bae 编写于 作者: F fengjiayi 提交者: GitHub

Merge pull request #704 from JiayiFeng/remove_Accuracy

Remove Accuracy
...@@ -47,7 +47,9 @@ def main(): ...@@ -47,7 +47,9 @@ def main():
optimizer = fluid.optimizer.Adam(learning_rate=0.01) optimizer = fluid.optimizer.Adam(learning_rate=0.01)
optimizer.minimize(avg_cost) optimizer.minimize(avg_cost)
accuracy = fluid.evaluator.Accuracy(input=logits, label=label) batch_size = fluid.layers.create_tensor(dtype='int64')
batch_acc = fluid.layers.accuracy(
input=logits, label=label, total=batch_size)
BATCH_SIZE = 50 BATCH_SIZE = 50
PASS_NUM = 3 PASS_NUM = 3
...@@ -63,20 +65,22 @@ def main(): ...@@ -63,20 +65,22 @@ def main():
feeder = fluid.DataFeeder(feed_list=[img, label], place=place) feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
pass_acc = fluid.average.WeightedAverage()
for pass_id in range(PASS_NUM): for pass_id in range(PASS_NUM):
accuracy.reset(exe) pass_acc.reset()
for data in train_reader(): for data in train_reader():
loss, acc = exe.run(fluid.default_main_program(), loss, acc, b_size = exe.run(
feed=feeder.feed(data), fluid.default_main_program(),
fetch_list=[avg_cost] + accuracy.metrics) feed=feeder.feed(data),
pass_acc = accuracy.eval(exe) fetch_list=[avg_cost, batch_acc, batch_size])
print("pass_id=" + str(pass_id) + " acc=" + str(acc) + " pass_acc=" pass_acc.add(value=acc, weight=b_size)
+ str(pass_acc)) print("pass_id=" + str(pass_id) + " acc=" + str(acc[0]) +
" pass_acc=" + str(pass_acc.eval()[0]))
if loss < LOSS_THRESHOLD and pass_acc > ACC_THRESHOLD: if loss < LOSS_THRESHOLD and pass_acc > ACC_THRESHOLD:
break break
pass_acc = accuracy.eval(exe) print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc.eval()[
print("pass_id=" + str(pass_id) + " pass_acc=" + str(pass_acc)) 0]))
fluid.io.save_params( fluid.io.save_params(
exe, dirname='./mnist', main_program=fluid.default_main_program()) exe, dirname='./mnist', main_program=fluid.default_main_program())
print('train mnist done') print('train mnist done')
......
...@@ -172,15 +172,16 @@ def train(learning_rate, batch_size, num_passes, model_save_dir='model'): ...@@ -172,15 +172,16 @@ def train(learning_rate, batch_size, num_passes, model_save_dir='model'):
momentum=0.9, momentum=0.9,
regularization=fluid.regularizer.L2Decay(5 * 1e-5)) regularization=fluid.regularizer.L2Decay(5 * 1e-5))
opts = optimizer.minimize(avg_cost) opts = optimizer.minimize(avg_cost)
accuracy = fluid.evaluator.Accuracy(input=out, label=label)
b_size_var = fluid.layers.create_tensor(dtype='int64')
b_acc_var = fluid.layers.accuracy(input=out, label=label, total=b_size_var)
inference_program = fluid.default_main_program().clone() inference_program = fluid.default_main_program().clone()
with fluid.program_guard(inference_program): with fluid.program_guard(inference_program):
test_accuracy = fluid.evaluator.Accuracy(input=out, label=label) inference_program = fluid.io.get_inference_program(
test_target = [avg_cost] + test_accuracy.metrics + test_accuracy.states target_vars=[b_acc_var, b_size_var])
inference_program = fluid.io.get_inference_program(test_target)
place = fluid.CUDAPlace(0) place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
...@@ -190,24 +191,29 @@ def train(learning_rate, batch_size, num_passes, model_save_dir='model'): ...@@ -190,24 +191,29 @@ def train(learning_rate, batch_size, num_passes, model_save_dir='model'):
paddle.dataset.flowers.test(), batch_size=batch_size) paddle.dataset.flowers.test(), batch_size=batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image, label]) feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
train_pass_acc_evaluator = fluid.average.WeightedAverage()
test_pass_acc_evaluator = fluid.average.WeightedAverage()
for pass_id in range(num_passes): for pass_id in range(num_passes):
accuracy.reset(exe) train_pass_acc_evaluator.reset()
for batch_id, data in enumerate(train_reader()): for batch_id, data in enumerate(train_reader()):
loss, acc = exe.run(fluid.default_main_program(), loss, acc, size = exe.run(
feed=feeder.feed(data), fluid.default_main_program(),
fetch_list=[avg_cost] + accuracy.metrics) feed=feeder.feed(data),
fetch_list=[avg_cost, b_acc_var, b_size_var])
train_pass_acc_evaluator.add(value=acc, weight=size)
print("Pass {0}, batch {1}, loss {2}, acc {3}".format( print("Pass {0}, batch {1}, loss {2}, acc {3}".format(
pass_id, batch_id, loss[0], acc[0])) pass_id, batch_id, loss[0], acc[0]))
pass_acc = accuracy.eval(exe)
test_accuracy.reset(exe) test_pass_acc_evaluator.reset()
for data in test_reader(): for data in test_reader():
loss, acc = exe.run(inference_program, loss, acc, size = exe.run(
feed=feeder.feed(data), inference_program,
fetch_list=[avg_cost] + test_accuracy.metrics) feed=feeder.feed(data),
test_pass_acc = test_accuracy.eval(exe) fetch_list=[avg_cost, b_acc_var, b_size_var])
test_pass_acc_evaluator.add(value=acc, weight=size)
print("End pass {0}, train_acc {1}, test_acc {2}".format( print("End pass {0}, train_acc {1}, test_acc {2}".format(
pass_id, pass_acc, test_pass_acc)) pass_id,
train_pass_acc_evaluator.eval(), test_pass_acc_evaluator.eval()))
if pass_id % 10 == 0: if pass_id % 10 == 0:
model_path = os.path.join(model_save_dir, str(pass_id)) model_path = os.path.join(model_save_dir, str(pass_id))
print 'save models to %s' % (model_path) print 'save models to %s' % (model_path)
......
...@@ -89,12 +89,14 @@ def main(dict_path): ...@@ -89,12 +89,14 @@ def main(dict_path):
sgd_optimizer = fluid.optimizer.SGD(learning_rate=conf.learning_rate) sgd_optimizer = fluid.optimizer.SGD(learning_rate=conf.learning_rate)
sgd_optimizer.minimize(avg_cost) sgd_optimizer.minimize(avg_cost)
accuracy = fluid.evaluator.Accuracy(input=prediction, label=label) batch_size_var = fluid.layers.create_tensor(dtype='int64')
batch_acc_var = fluid.layers.accuracy(
input=prediction, label=label, total=batch_size_var)
inference_program = fluid.default_main_program().clone() inference_program = fluid.default_main_program().clone()
with fluid.program_guard(inference_program): with fluid.program_guard(inference_program):
test_target = accuracy.metrics + accuracy.states inference_program = fluid.io.get_inference_program(
inference_program = fluid.io.get_inference_program(test_target) target_vars=[batch_acc_var, batch_size_var])
# The training data set. # The training data set.
train_reader = paddle.batch( train_reader = paddle.batch(
...@@ -119,31 +121,37 @@ def main(dict_path): ...@@ -119,31 +121,37 @@ def main(dict_path):
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
train_pass_acc_evaluator = fluid.average.WeightedAverage()
test_pass_acc_evaluator = fluid.average.WeightedAverage()
def test(exe): def test(exe):
accuracy.reset(exe) test_pass_acc_evaluator.reset()
for batch_id, data in enumerate(test_reader()): for batch_id, data in enumerate(test_reader()):
input_seq = to_lodtensor(map(lambda x: x[0], data), place) input_seq = to_lodtensor(map(lambda x: x[0], data), place)
y_data = np.array(map(lambda x: x[1], data)).astype("int64") y_data = np.array(map(lambda x: x[1], data)).astype("int64")
y_data = y_data.reshape([-1, 1]) y_data = y_data.reshape([-1, 1])
acc = exe.run(inference_program, b_acc, b_size = exe.run(inference_program,
feed={"words": input_seq, feed={"words": input_seq,
"label": y_data}) "label": y_data},
test_acc = accuracy.eval(exe) fetch_list=[batch_acc_var, batch_size_var])
test_pass_acc_evaluator.add(value=b_acc, weight=b_size)
test_acc = test_pass_acc_evaluator.eval()
return test_acc return test_acc
total_time = 0. total_time = 0.
for pass_id in xrange(conf.num_passes): for pass_id in xrange(conf.num_passes):
accuracy.reset(exe) train_pass_acc_evaluator.reset()
start_time = time.time() start_time = time.time()
for batch_id, data in enumerate(train_reader()): for batch_id, data in enumerate(train_reader()):
cost_val, acc_val = exe.run( cost_val, acc_val, size_val = exe.run(
fluid.default_main_program(), fluid.default_main_program(),
feed=feeder.feed(data), feed=feeder.feed(data),
fetch_list=[avg_cost, accuracy.metrics[0]]) fetch_list=[avg_cost, batch_acc_var, batch_size_var])
pass_acc = accuracy.eval(exe) train_pass_acc_evaluator.add(value=acc_val, weight=size_val)
if batch_id and batch_id % conf.log_period == 0: if batch_id and batch_id % conf.log_period == 0:
print("Pass id: %d, batch id: %d, cost: %f, pass_acc %f" % print("Pass id: %d, batch id: %d, cost: %f, pass_acc: %f" %
(pass_id, batch_id, cost_val, pass_acc)) (pass_id, batch_id, cost_val,
train_pass_acc_evaluator.eval()))
end_time = time.time() end_time = time.time()
total_time += (end_time - start_time) total_time += (end_time - start_time)
pass_test_acc = test(exe) pass_test_acc = test(exe)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册