diff --git a/fluid/PaddleRec/ssr/.run_ce.sh b/fluid/PaddleRec/ssr/.run_ce.sh new file mode 100755 index 0000000000000000000000000000000000000000..ffcc0f6ac0a400641fe42cd2f6d62d8488115432 --- /dev/null +++ b/fluid/PaddleRec/ssr/.run_ce.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +export MKL_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + + +export CPU_NUM=1 +export NUM_THREADS=1 + +FLAGS_benchmark=true python train.py --train_dir train_big_data --vocab_path vocab_big.txt --use_cuda 0 --batch_size 500 --model_dir model_output --epochs 2 --enable_ce --step_num 500 | python _ce.py + + +cudaid=${ssr:=0} # use 0-th card as default +export CUDA_VISIBLE_DEVICES=$cudaid + +FLAGS_benchmark=true python train.py --train_dir train_big_data --vocab_path vocab_big.txt --use_cuda 1 --batch_size 500 --model_dir model_output --epochs 2 --enable_ce --step_num 1000 | python _ce.py + + +cudaid=${ssr_4:=0,1,2,3} # use 0-th card as default +export CUDA_VISIBLE_DEVICES=$cudaid + +FLAGS_benchmark=true python train.py --train_dir train_big_data --vocab_path vocab_big.txt --use_cuda 1 --parallel 1 --num_devices 2 --batch_size 500 --model_dir model_output --epochs 2 --enable_ce --step_num 1000 | python _ce.py diff --git a/fluid/PaddleRec/ssr/__init__.py b/fluid/PaddleRec/ssr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/fluid/PaddleRec/ssr/_ce.py b/fluid/PaddleRec/ssr/_ce.py new file mode 100644 index 0000000000000000000000000000000000000000..e5a3d3b19339b39e638d4e708878a68778fb3fd9 --- /dev/null +++ b/fluid/PaddleRec/ssr/_ce.py @@ -0,0 +1,66 @@ +# this file is only used for continuous evaluation test! + +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi +from kpi import DurationKpi +from kpi import AccKpi + + +each_pass_duration_cpu1_thread1_kpi = DurationKpi('each_pass_duration_cpu1_thread1', 0.08, 0, actived=True) +train_acc_cpu1_thread1_kpi = CostKpi('train_acc_cpu1_thread1', 0.08, 0) +each_pass_duration_gpu1_kpi = DurationKpi('each_pass_duration_gpu1', 0.08, 0, actived=True) +train_acc_gpu1_kpi = CostKpi('train_acc_gpu1', 0.08, 0) +each_pass_duration_gpu4_kpi = DurationKpi('each_pass_duration_gpu4', 0.08, 0, actived=True) +train_acc_gpu4_kpi = CostKpi('train_acc_gpu4', 0.08, 0) + +tracking_kpis = [ + each_pass_duration_cpu1_thread1_kpi, + train_acc_cpu1_thread1_kpi, + each_pass_duration_gpu1_kpi, + train_acc_gpu1_kpi, + each_pass_duration_gpu4_kpi, + train_acc_gpu4_kpi, + ] + + +def parse_log(log): + ''' + This method should be implemented by model developers. + + The suggestion: + + each line in the log should be key, value, for example: + + " + train_cost\t1.0 + test_cost\t1.0 + train_cost\t1.0 + train_cost\t1.0 + train_acc\t1.2 + " + ''' + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/fluid/PaddleRec/ssr/train.py b/fluid/PaddleRec/ssr/train.py index 8ca5e8ee3b9d79af52a9e75c7540f1e104750b96..1c0c9f8cc3ed6750d21ba43985fb142dc527cf00 100644 --- a/fluid/PaddleRec/ssr/train.py +++ b/fluid/PaddleRec/ssr/train.py @@ -53,6 +53,12 @@ def parse_args(): "--embedding_dim", type=int, default=128, help="embedding dim") parser.add_argument( '--num_devices', type=int, default=1, help='Number of GPU devices') + parser.add_argument( + '--step_num', type=int, default=1000, help='Number of steps') + parser.add_argument( + '--enable_ce', + action='store_true', + help='If set, run the task with continuous evaluation logs.') return parser.parse_args() @@ -61,6 +67,10 @@ def get_cards(args): def train(args): + if args.enable_ce: + SEED = 102 + fluid.default_startup_program().random_seed = SEED + fluid.default_main_program().random_seed = SEED use_cuda = True if args.use_cuda else False parallel = True if args.parallel else False print("use_cuda:", use_cuda, "parallel:", parallel) @@ -87,6 +97,7 @@ def train(args): train_exe = exe total_time = 0.0 + ce_info = [] for pass_id in range(args.epochs): epoch_idx = pass_id + 1 print("epoch_%d start" % epoch_idx) @@ -96,12 +107,15 @@ def train(args): i += 1 loss_val, correct_val = train_exe.run( feed=feeder.feed(data), fetch_list=[avg_cost.name, acc.name]) + ce_info.append(float(np.mean(correct_val)) / args.batch_size) if i % args.print_batch == 0: logger.info( "Train --> pass: {} batch_id: {} avg_cost: {}, acc: {}". format(pass_id, batch_id, np.mean(loss_val), float(np.mean(correct_val)) / args.batch_size)) + if args.enable_ce and i > args.step_num: + break t1 = time.time() total_time += t1 - t0 print("epoch:%d num_steps:%d time_cost(s):%f" % @@ -110,6 +124,40 @@ def train(args): fluid.io.save_params(executor=exe, dirname=save_dir) print("model saved in %s" % save_dir) + # only for ce + if args.enable_ce: + ce_acc = 0 + try: + ce_acc = ce_info[-2] + except: + print("ce info error") + epoch_idx = args.epochs + device = get_device(args) + if args.use_cuda: + gpu_num = device[1] + print("kpis\teach_pass_duration_gpu%s\t%s" % + (gpu_num, total_time / epoch_idx)) + print("kpis\ttrain_acc_gpu%s\t%s" % + (gpu_num, ce_acc)) + else: + cpu_num = device[1] + threads_num = device[2] + print("kpis\teach_pass_duration_cpu%s_thread%s\t%s" % + (cpu_num, threads_num, total_time / epoch_idx)) + print("kpis\ttrain_acc_cpu%s_thread%s\t%s" % + (cpu_num, threads_num, ce_acc)) + + +def get_device(args): + if args.use_cuda: + gpus = os.environ.get("CUDA_VISIBLE_DEVICES", 1) + gpu_num = len(gpus.split(',')) + return "gpu", gpu_num + else: + threads_num = os.environ.get('NUM_THREADS', 1) + cpu_num = os.environ.get('CPU_NUM', 1) + return "cpu", int(cpu_num), int(threads_num) + def main(): args = parse_args()