From b2273c5e6dcf1864e0281f2bb99aa259db3ab0e4 Mon Sep 17 00:00:00 2001 From: zhengya01 <43601548+zhengya01@users.noreply.github.com> Date: Fri, 15 Mar 2019 20:05:04 +0800 Subject: [PATCH] Ce chinese ner (#1890) * add chinese_ner ce * add chinese_ner ce --- fluid/PaddleNLP/chinese_ner/.run_ce.sh | 20 ++++++++ fluid/PaddleNLP/chinese_ner/__init__.py | 0 fluid/PaddleNLP/chinese_ner/_ce.py | 66 +++++++++++++++++++++++++ fluid/PaddleNLP/chinese_ner/train.py | 47 +++++++++++++++++- 4 files changed, 132 insertions(+), 1 deletion(-) create mode 100755 fluid/PaddleNLP/chinese_ner/.run_ce.sh create mode 100644 fluid/PaddleNLP/chinese_ner/__init__.py create mode 100644 fluid/PaddleNLP/chinese_ner/_ce.py diff --git a/fluid/PaddleNLP/chinese_ner/.run_ce.sh b/fluid/PaddleNLP/chinese_ner/.run_ce.sh new file mode 100755 index 00000000..79b2da72 --- /dev/null +++ b/fluid/PaddleNLP/chinese_ner/.run_ce.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +export MKL_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + + +cudaid=${chinese_ner:=0} # use 0-th card as default +export CUDA_VISIBLE_DEVICES=$cudaid + +FLAGS_benchmark=true python train.py --num_passes 300 --device GPU --enable_ce | python _ce.py + +cudaid=${chinese_ner_4:=0,1,2,3} # use 0-th card as default +export CUDA_VISIBLE_DEVICES=$cudaid + +FLAGS_benchmark=true python train.py --num_passes 300 --device GPU --parallel True --enable_ce | python _ce.py + +export CPU_NUM=1 +export NUM_THREADS=1 + +FLAGS_benchmark=true python train.py --num_passes 300 --device CPU --enable_ce | python _ce.py diff --git a/fluid/PaddleNLP/chinese_ner/__init__.py b/fluid/PaddleNLP/chinese_ner/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/fluid/PaddleNLP/chinese_ner/_ce.py b/fluid/PaddleNLP/chinese_ner/_ce.py new file mode 100644 index 00000000..7afa65d3 --- /dev/null +++ b/fluid/PaddleNLP/chinese_ner/_ce.py @@ -0,0 +1,66 @@ +# this file is only used for continuous evaluation test! + +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi +from kpi import DurationKpi +from kpi import AccKpi + + +each_pass_duration_cpu1_thread1_kpi = DurationKpi('each_pass_duration_cpu1_thread1', 0.08, 0, actived=True) +train_recall_cpu1_thread1_kpi = AccKpi('train_recall_cpu1_thread1', 0.08, 0) +each_pass_duration_gpu1_kpi = DurationKpi('each_pass_duration_gpu1', 0.08, 0, actived=True) +train_recall_gpu1_kpi = AccKpi('train_recall_gpu1', 0.08, 0) +each_pass_duration_gpu4_kpi = DurationKpi('each_pass_duration_gpu4', 0.08, 0, actived=True) +train_recall_gpu4_kpi = AccKpi('train_recall_gpu4', 0.08, 0) + +tracking_kpis = [ + each_pass_duration_cpu1_thread1_kpi, + train_recall_cpu1_thread1_kpi, + each_pass_duration_gpu1_kpi, + train_recall_gpu1_kpi, + each_pass_duration_gpu4_kpi, + train_recall_gpu4_kpi, + ] + + +def parse_log(log): + ''' + This method should be implemented by model developers. + + The suggestion: + + each line in the log should be key, value, for example: + + " + train_cost\t1.0 + test_cost\t1.0 + train_cost\t1.0 + train_cost\t1.0 + train_acc\t1.2 + " + ''' + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/fluid/PaddleNLP/chinese_ner/train.py b/fluid/PaddleNLP/chinese_ner/train.py index fc65528c..4a6a61b3 100644 --- a/fluid/PaddleNLP/chinese_ner/train.py +++ b/fluid/PaddleNLP/chinese_ner/train.py @@ -59,6 +59,10 @@ def parse_args(): type=int, default=1000, help='The number of epochs. (default: %(default)d)') + parser.add_argument( + '--enable_ce', + action='store_true', + help='If set, run the task with continuous evaluation logs.') args = parser.parse_args() return args @@ -265,6 +269,10 @@ def main(args): main = fluid.Program() startup = fluid.Program() + if args.enable_ce: + SEED = 102 + main.random_seed = SEED + startup.random_seed = SEED with fluid.program_guard(main, startup): avg_cost, feature_out, word, mention, target = ner_net( args.word_dict_len, args.label_dict_len) @@ -313,6 +321,8 @@ def main(args): train_exe = exe test_exe = exe + total_time = 0 + ce_info = [] batch_id = 0 for pass_id in range(args.num_passes): chunk_evaluator.reset() @@ -336,11 +346,13 @@ def main(args): except StopIteration: break end_time = time.time() + total_time += end_time - start_time print("pass_id:" + str(pass_id) + ", time_cost:" + str( end_time - start_time) + "s") precision, recall, f1_score = chunk_evaluator.eval() print("[Train] precision:" + str(precision) + ", recall:" + str( recall) + ", f1:" + str(f1_score)) + ce_info.append(recall) p, r, f1 = test2( exe, chunk_evaluator, inference_program, test_reader, place, [num_infer_chunks, num_label_chunks, num_correct_chunks]) @@ -350,7 +362,40 @@ def main(args): "params_pass_%d" % pass_id) fluid.io.save_inference_model(save_dirname, ['word', 'mention'], [crf_decode], exe) - + # only for ce + if args.enable_ce: + ce_recall = 0 + try: + ce_recall = ce_info[-2] + except: + print("ce info error") + epoch_idx = args.num_passes + device = get_device(args) + if args.device == "GPU": + gpu_num = device[1] + print("kpis\teach_pass_duration_gpu%s\t%s" % + (gpu_num, total_time / epoch_idx)) + print("kpis\ttrain_recall_gpu%s\t%s" % + (gpu_num, ce_recall)) + else: + cpu_num = device[1] + threads_num = device[2] + print("kpis\teach_pass_duration_cpu%s_thread%s\t%s" % + (cpu_num, threads_num, total_time / epoch_idx)) + print("kpis\ttrain_recall_cpu%s_thread%s\t%s" % + (cpu_num, threads_num, ce_recall)) + + +def get_device(args): + if args.device == "GPU": + gpus = os.environ.get("CUDA_VISIBLE_DEVICES", "") + gpu_num = len(gpus.split(',')) + return "gpu", gpu_num + else: + threads_num = os.environ.get('NUM_THREADS', 1) + cpu_num = os.environ.get('CPU_NUM', 1) + return "cpu", int(cpu_num), int(threads_num) + if __name__ == "__main__": args = parse_args() -- GitLab