提交 39e5823e 编写于 作者: C ccmeteorljh

add time statistics for some models

上级 d2243816
...@@ -11,7 +11,7 @@ import argparse ...@@ -11,7 +11,7 @@ import argparse
from reader import CityscapeDataset from reader import CityscapeDataset
import reader import reader
import models import models
import time
def add_argument(name, type, default, help): def add_argument(name, type, default, help):
parser.add_argument('--' + name, default=default, type=type, help=help) parser.add_argument('--' + name, default=default, type=type, help=help)
...@@ -141,6 +141,7 @@ if args.parallel: ...@@ -141,6 +141,7 @@ if args.parallel:
batches = dataset.get_batch_generator(batch_size, total_step) batches = dataset.get_batch_generator(batch_size, total_step)
for i, imgs, labels, names in batches: for i, imgs, labels, names in batches:
prev_start_time = time.time()
if args.parallel: if args.parallel:
retv = exe_p.run(fetch_list=[pred.name, loss_mean.name], retv = exe_p.run(fetch_list=[pred.name, loss_mean.name],
feed={'img': imgs, feed={'img': imgs,
...@@ -150,10 +151,12 @@ for i, imgs, labels, names in batches: ...@@ -150,10 +151,12 @@ for i, imgs, labels, names in batches:
feed={'img': imgs, feed={'img': imgs,
'label': labels}, 'label': labels},
fetch_list=[pred, loss_mean]) fetch_list=[pred, loss_mean])
end_time = time.time()
if i % 100 == 0: if i % 100 == 0:
print("Model is saved to", args.save_weights_path) print("Model is saved to", args.save_weights_path)
save_model() save_model()
print("step %s, loss: %s" % (i, np.mean(retv[1]))) print("step {:d}, loss: {:.6f}, step_time_cost: {:.3f}" .format(i,
np.mean(retv[1]), end_time - prev_start_time))
print("Training done. Model is saved to", args.save_weights_path) print("Training done. Model is saved to", args.save_weights_path)
save_model() save_model()
...@@ -73,8 +73,8 @@ def train(train_reader, ...@@ -73,8 +73,8 @@ def train(train_reader,
avg_cost = total_cost / data_count avg_cost = total_cost / data_count
avg_acc = total_acc / data_count avg_acc = total_acc / data_count
print("pass_id: %d, avg_acc: %f, avg_cost: %f" % print("pass_id: %d, avg_acc: %f, avg_cost: %f, pass_time_cost: %f" %
(pass_id, avg_acc, avg_cost)) (pass_id, avg_acc, avg_cost, time.time() - pass_start))
epoch_model = save_dirname + "/" + "epoch" + str(pass_id) epoch_model = save_dirname + "/" + "epoch" + str(pass_id)
fluid.io.save_inference_model(epoch_model, ["words", "label"], acc, exe) fluid.io.save_inference_model(epoch_model, ["words", "label"], acc, exe)
......
...@@ -171,6 +171,7 @@ def train_and_evaluate(train_reader, ...@@ -171,6 +171,7 @@ def train_and_evaluate(train_reader,
for epoch_id in range(global_config.epoch_num): for epoch_id in range(global_config.epoch_num):
data_size, data_count, total_acc, total_cost = 0, 0, 0.0, 0.0 data_size, data_count, total_acc, total_cost = 0, 0, 0.0, 0.0
batch_id = 0 batch_id = 0
epoch_begin_time = time.time()
for data in train_reader(): for data in train_reader():
avg_cost_np, avg_acc_np = exe.run(fluid.default_main_program(), avg_cost_np, avg_acc_np = exe.run(fluid.default_main_program(),
feed=feeder.feed(data), feed=feeder.feed(data),
...@@ -192,8 +193,10 @@ def train_and_evaluate(train_reader, ...@@ -192,8 +193,10 @@ def train_and_evaluate(train_reader,
avg_acc = total_acc / data_count avg_acc = total_acc / data_count
print("") print("")
print("[%s] epoch_id: %d, train_avg_cost: %f, train_avg_acc: %f" % ( print("[%s] epoch_id: %d, train_avg_cost: %f, train_avg_acc: %f, epoch_time_cost: %f" % (
time.asctime( time.localtime(time.time()) ), epoch_id, avg_cost, avg_acc)) time.asctime( time.localtime(time.time())),
epoch_id, avg_cost, avg_acc,
time.time() - epoch_begin_time))
epoch_model = global_config.save_dirname + "/" + "epoch" + str(epoch_id) epoch_model = global_config.save_dirname + "/" + "epoch" + str(epoch_id)
fluid.io.save_inference_model(epoch_model, ["question1", "question2", "label"], acc, exe) fluid.io.save_inference_model(epoch_model, ["question1", "question2", "label"], acc, exe)
......
...@@ -3,6 +3,7 @@ from __future__ import print_function ...@@ -3,6 +3,7 @@ from __future__ import print_function
import argparse import argparse
import logging import logging
import os import os
import time
# disable gpu training for this example # disable gpu training for this example
os.environ["CUDA_VISIBLE_DEVICES"] = "" os.environ["CUDA_VISIBLE_DEVICES"] = ""
...@@ -122,6 +123,7 @@ def train_loop(args, train_program, data_list, loss, auc_var, batch_auc_var, ...@@ -122,6 +123,7 @@ def train_loop(args, train_program, data_list, loss, auc_var, batch_auc_var,
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
for pass_id in range(args.num_passes): for pass_id in range(args.num_passes):
pass_start = time.time()
for batch_id, data in enumerate(train_reader()): for batch_id, data in enumerate(train_reader()):
loss_val, auc_val, batch_auc_val = exe.run( loss_val, auc_val, batch_auc_val = exe.run(
train_program, train_program,
...@@ -134,6 +136,7 @@ def train_loop(args, train_program, data_list, loss, auc_var, batch_auc_var, ...@@ -134,6 +136,7 @@ def train_loop(args, train_program, data_list, loss, auc_var, batch_auc_var,
model_dir = args.model_output_dir + '/batch-' + str(batch_id) model_dir = args.model_output_dir + '/batch-' + str(batch_id)
if args.trainer_id == 0: if args.trainer_id == 0:
fluid.io.save_inference_model(model_dir, data_name_list, [loss, auc_var], exe) fluid.io.save_inference_model(model_dir, data_name_list, [loss, auc_var], exe)
print("pass_id: %d, pass_time_cost: %f" % (pass_id, time.time() - pass_start))
model_dir = args.model_output_dir + '/pass-' + str(pass_id) model_dir = args.model_output_dir + '/pass-' + str(pass_id)
if args.trainer_id == 0: if args.trainer_id == 0:
fluid.io.save_inference_model(model_dir, data_name_list, [loss, auc_var], exe) fluid.io.save_inference_model(model_dir, data_name_list, [loss, auc_var], exe)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册