提交 e1c0f28a 编写于 作者: u010070587's avatar u010070587 提交者: ruri

finetune ce and refine gnn (#4089)

上级 810e4023
......@@ -489,7 +489,7 @@ def print_ce(device_num, metrics, time_info):
print("kpis\ttrain_cost_card{}\t{}".format(device_num, train_loss))
print("kpis\ttrain_acc1_card{}\t{}".format(device_num, train_acc1))
print("kpis\ttrain_acc5_card{}\t{}".format(device_num, train_acc5))
print("kpis\ttest_loss_card{}\t{}".format(device_num, test_loss))
print("kpis\ttest_cost_card{}\t{}".format(device_num, test_loss))
print("kpis\ttest_acc1_card{}\t{}".format(device_num, test_acc1))
print("kpis\ttest_acc5_card{}\t{}".format(device_num, test_acc5))
print("kpis\ttrain_speed_card{}\t{}".format(device_num, train_speed))
......
......@@ -274,8 +274,8 @@ def train_async(args):
# This is for continuous evaluation only
if args.enable_ce:
# Use the mean cost/acc for training
print("kpis train_cost %s" % (avg_loss))
print("kpis test_recall %s" % (recall))
print("kpis\ttrain_cost\t{}".format(avg_loss))
print("kpis\ttest_recall\t{}".format(recall))
def initlogging():
......
......@@ -101,10 +101,8 @@ def train():
feed_list = [e.name for e in feed_datas]
if use_parallel:
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 1 if os.name == 'nt' else 0
train_exe = fluid.ParallelExecutor(
use_cuda=use_cuda, loss_name=loss.name, exec_strategy=exec_strategy)
use_cuda=use_cuda, loss_name=loss.name)
else:
train_exe = exe
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册