提交 b2273c5e 编写于 作者: Z zhengya01 提交者: Yibing Liu

Ce chinese ner (#1890)

* add chinese_ner ce

* add chinese_ner ce
上级 b9c32c77
#!/bin/bash
export MKL_NUM_THREADS=1
export OMP_NUM_THREADS=1
cudaid=${chinese_ner:=0} # use 0-th card as default
export CUDA_VISIBLE_DEVICES=$cudaid
FLAGS_benchmark=true python train.py --num_passes 300 --device GPU --enable_ce | python _ce.py
cudaid=${chinese_ner_4:=0,1,2,3} # use 0-th card as default
export CUDA_VISIBLE_DEVICES=$cudaid
FLAGS_benchmark=true python train.py --num_passes 300 --device GPU --parallel True --enable_ce | python _ce.py
export CPU_NUM=1
export NUM_THREADS=1
FLAGS_benchmark=true python train.py --num_passes 300 --device CPU --enable_ce | python _ce.py
# this file is only used for continuous evaluation test!
import os
import sys
sys.path.append(os.environ['ceroot'])
from kpi import CostKpi
from kpi import DurationKpi
from kpi import AccKpi
each_pass_duration_cpu1_thread1_kpi = DurationKpi('each_pass_duration_cpu1_thread1', 0.08, 0, actived=True)
train_recall_cpu1_thread1_kpi = AccKpi('train_recall_cpu1_thread1', 0.08, 0)
each_pass_duration_gpu1_kpi = DurationKpi('each_pass_duration_gpu1', 0.08, 0, actived=True)
train_recall_gpu1_kpi = AccKpi('train_recall_gpu1', 0.08, 0)
each_pass_duration_gpu4_kpi = DurationKpi('each_pass_duration_gpu4', 0.08, 0, actived=True)
train_recall_gpu4_kpi = AccKpi('train_recall_gpu4', 0.08, 0)
tracking_kpis = [
each_pass_duration_cpu1_thread1_kpi,
train_recall_cpu1_thread1_kpi,
each_pass_duration_gpu1_kpi,
train_recall_gpu1_kpi,
each_pass_duration_gpu4_kpi,
train_recall_gpu4_kpi,
]
def parse_log(log):
'''
This method should be implemented by model developers.
The suggestion:
each line in the log should be key, value, for example:
"
train_cost\t1.0
test_cost\t1.0
train_cost\t1.0
train_cost\t1.0
train_acc\t1.2
"
'''
for line in log.split('\n'):
fs = line.strip().split('\t')
print(fs)
if len(fs) == 3 and fs[0] == 'kpis':
kpi_name = fs[1]
kpi_value = float(fs[2])
yield kpi_name, kpi_value
def log_to_ce(log):
kpi_tracker = {}
for kpi in tracking_kpis:
kpi_tracker[kpi.name] = kpi
for (kpi_name, kpi_value) in parse_log(log):
print(kpi_name, kpi_value)
kpi_tracker[kpi_name].add_record(kpi_value)
kpi_tracker[kpi_name].persist()
if __name__ == '__main__':
log = sys.stdin.read()
log_to_ce(log)
...@@ -59,6 +59,10 @@ def parse_args(): ...@@ -59,6 +59,10 @@ def parse_args():
type=int, type=int,
default=1000, default=1000,
help='The number of epochs. (default: %(default)d)') help='The number of epochs. (default: %(default)d)')
parser.add_argument(
'--enable_ce',
action='store_true',
help='If set, run the task with continuous evaluation logs.')
args = parser.parse_args() args = parser.parse_args()
return args return args
...@@ -265,6 +269,10 @@ def main(args): ...@@ -265,6 +269,10 @@ def main(args):
main = fluid.Program() main = fluid.Program()
startup = fluid.Program() startup = fluid.Program()
if args.enable_ce:
SEED = 102
main.random_seed = SEED
startup.random_seed = SEED
with fluid.program_guard(main, startup): with fluid.program_guard(main, startup):
avg_cost, feature_out, word, mention, target = ner_net( avg_cost, feature_out, word, mention, target = ner_net(
args.word_dict_len, args.label_dict_len) args.word_dict_len, args.label_dict_len)
...@@ -313,6 +321,8 @@ def main(args): ...@@ -313,6 +321,8 @@ def main(args):
train_exe = exe train_exe = exe
test_exe = exe test_exe = exe
total_time = 0
ce_info = []
batch_id = 0 batch_id = 0
for pass_id in range(args.num_passes): for pass_id in range(args.num_passes):
chunk_evaluator.reset() chunk_evaluator.reset()
...@@ -336,11 +346,13 @@ def main(args): ...@@ -336,11 +346,13 @@ def main(args):
except StopIteration: except StopIteration:
break break
end_time = time.time() end_time = time.time()
total_time += end_time - start_time
print("pass_id:" + str(pass_id) + ", time_cost:" + str( print("pass_id:" + str(pass_id) + ", time_cost:" + str(
end_time - start_time) + "s") end_time - start_time) + "s")
precision, recall, f1_score = chunk_evaluator.eval() precision, recall, f1_score = chunk_evaluator.eval()
print("[Train] precision:" + str(precision) + ", recall:" + str( print("[Train] precision:" + str(precision) + ", recall:" + str(
recall) + ", f1:" + str(f1_score)) recall) + ", f1:" + str(f1_score))
ce_info.append(recall)
p, r, f1 = test2( p, r, f1 = test2(
exe, chunk_evaluator, inference_program, test_reader, place, exe, chunk_evaluator, inference_program, test_reader, place,
[num_infer_chunks, num_label_chunks, num_correct_chunks]) [num_infer_chunks, num_label_chunks, num_correct_chunks])
...@@ -350,7 +362,40 @@ def main(args): ...@@ -350,7 +362,40 @@ def main(args):
"params_pass_%d" % pass_id) "params_pass_%d" % pass_id)
fluid.io.save_inference_model(save_dirname, ['word', 'mention'], fluid.io.save_inference_model(save_dirname, ['word', 'mention'],
[crf_decode], exe) [crf_decode], exe)
# only for ce
if args.enable_ce:
ce_recall = 0
try:
ce_recall = ce_info[-2]
except:
print("ce info error")
epoch_idx = args.num_passes
device = get_device(args)
if args.device == "GPU":
gpu_num = device[1]
print("kpis\teach_pass_duration_gpu%s\t%s" %
(gpu_num, total_time / epoch_idx))
print("kpis\ttrain_recall_gpu%s\t%s" %
(gpu_num, ce_recall))
else:
cpu_num = device[1]
threads_num = device[2]
print("kpis\teach_pass_duration_cpu%s_thread%s\t%s" %
(cpu_num, threads_num, total_time / epoch_idx))
print("kpis\ttrain_recall_cpu%s_thread%s\t%s" %
(cpu_num, threads_num, ce_recall))
def get_device(args):
if args.device == "GPU":
gpus = os.environ.get("CUDA_VISIBLE_DEVICES", "")
gpu_num = len(gpus.split(','))
return "gpu", gpu_num
else:
threads_num = os.environ.get('NUM_THREADS', 1)
cpu_num = os.environ.get('CPU_NUM', 1)
return "cpu", int(cpu_num), int(threads_num)
if __name__ == "__main__": if __name__ == "__main__":
args = parse_args() args = parse_args()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册