diff --git a/benchmark/analysis.py b/benchmark/analysis.py index c4189b99d8ee082082a254718617a7e58bebe961..7322f00ace94ff25e8aba38106471d32a5e8223d 100644 --- a/benchmark/analysis.py +++ b/benchmark/analysis.py @@ -26,35 +26,57 @@ def parse_args(): parser.add_argument( "--filename", type=str, help="The name of log which need to analysis.") parser.add_argument( - "--log_with_profiler", type=str, help="The path of train log with profiler") + "--log_with_profiler", + type=str, + help="The path of train log with profiler") parser.add_argument( "--profiler_path", type=str, help="The path of profiler timeline log.") parser.add_argument( "--keyword", type=str, help="Keyword to specify analysis data") parser.add_argument( - "--separator", type=str, default=None, help="Separator of different field in log") + "--separator", + type=str, + default=None, + help="Separator of different field in log") parser.add_argument( '--position', type=int, default=None, help='The position of data field') parser.add_argument( - '--range', type=str, default="", help='The range of data field to intercept') + '--range', + type=str, + default="", + help='The range of data field to intercept') parser.add_argument( '--base_batch_size', type=int, help='base_batch size on gpu') parser.add_argument( - '--skip_steps', type=int, default=0, help='The number of steps to be skipped') + '--skip_steps', + type=int, + default=0, + help='The number of steps to be skipped') parser.add_argument( - '--model_mode', type=int, default=-1, help='Analysis mode, default value is -1') + '--model_mode', + type=int, + default=-1, + help='Analysis mode, default value is -1') + parser.add_argument('--ips_unit', type=str, default=None, help='IPS unit') parser.add_argument( - '--ips_unit', type=str, default=None, help='IPS unit') - parser.add_argument( - '--model_name', type=str, default=0, help='training model_name, transformer_base') + '--model_name', + type=str, + default=0, + help='training model_name, transformer_base') parser.add_argument( '--mission_name', type=str, default=0, help='training mission name') parser.add_argument( '--direction_id', type=int, default=0, help='training direction_id') parser.add_argument( - '--run_mode', type=str, default="sp", help='multi process or single process') + '--run_mode', + type=str, + default="sp", + help='multi process or single process') parser.add_argument( - '--index', type=int, default=1, help='{1: speed, 2:mem, 3:profiler, 6:max_batch_size}') + '--index', + type=int, + default=1, + help='{1: speed, 2:mem, 3:profiler, 6:max_batch_size}') parser.add_argument( '--gpu_num', type=int, default=1, help='nums of training gpus') args = parser.parse_args() @@ -72,7 +94,12 @@ def _is_number(num): class TimeAnalyzer(object): - def __init__(self, filename, keyword=None, separator=None, position=None, range="-1"): + def __init__(self, + filename, + keyword=None, + separator=None, + position=None, + range="-1"): if filename is None: raise Exception("Please specify the filename!") @@ -99,7 +126,8 @@ class TimeAnalyzer(object): # Distil the string from a line. line = line.strip() - line_words = line.split(self.separator) if self.separator else line.split() + line_words = line.split( + self.separator) if self.separator else line.split() if args.position: result = line_words[self.position] else: @@ -108,27 +136,36 @@ class TimeAnalyzer(object): if line_words[i] == self.keyword: result = line_words[i + 1] break - + # Distil the result from the picked string. if not self.range: result = result[0:] elif _is_number(self.range): - result = result[0: int(self.range)] + result = result[0:int(self.range)] else: - result = result[int(self.range.split(":")[0]): int(self.range.split(":")[1])] + result = result[int(self.range.split(":")[0]):int( + self.range.split(":")[1])] self.records.append(float(result)) except Exception as exc: - print("line is: {}; separator={}; position={}".format(line, self.separator, self.position)) + print("line is: {}; separator={}; position={}".format( + line, self.separator, self.position)) - print("Extract {} records: separator={}; position={}".format(len(self.records), self.separator, self.position)) + print("Extract {} records: separator={}; position={}".format( + len(self.records), self.separator, self.position)) - def _get_fps(self, mode, batch_size, gpu_num, avg_of_records, run_mode, unit=None): + def _get_fps(self, + mode, + batch_size, + gpu_num, + avg_of_records, + run_mode, + unit=None): if mode == -1 and run_mode == 'sp': assert unit, "Please set the unit when mode is -1." fps = gpu_num * avg_of_records elif mode == -1 and run_mode == 'mp': assert unit, "Please set the unit when mode is -1." - fps = gpu_num * avg_of_records #temporarily, not used now + fps = gpu_num * avg_of_records #temporarily, not used now print("------------this is mp") elif mode == 0: # s/step -> samples/s @@ -155,12 +192,20 @@ class TimeAnalyzer(object): return fps, unit - def analysis(self, batch_size, gpu_num=1, skip_steps=0, mode=-1, run_mode='sp', unit=None): + def analysis(self, + batch_size, + gpu_num=1, + skip_steps=0, + mode=-1, + run_mode='sp', + unit=None): if batch_size <= 0: print("base_batch_size should larger than 0.") return 0, '' - if len(self.records) <= skip_steps: # to address the condition which item of log equals to skip_steps + if len( + self.records + ) <= skip_steps: # to address the condition which item of log equals to skip_steps print("no records") return 0, '' @@ -180,16 +225,20 @@ class TimeAnalyzer(object): skip_max = self.records[i] avg_of_records = sum_of_records / float(count) - avg_of_records_skipped = sum_of_records_skipped / float(count - skip_steps) + avg_of_records_skipped = sum_of_records_skipped / float(count - + skip_steps) - fps, fps_unit = self._get_fps(mode, batch_size, gpu_num, avg_of_records, run_mode, unit) - fps_skipped, _ = self._get_fps(mode, batch_size, gpu_num, avg_of_records_skipped, run_mode, unit) + fps, fps_unit = self._get_fps(mode, batch_size, gpu_num, avg_of_records, + run_mode, unit) + fps_skipped, _ = self._get_fps(mode, batch_size, gpu_num, + avg_of_records_skipped, run_mode, unit) if mode == -1: print("average ips of %d steps, skip 0 step:" % count) print("\tAvg: %.3f %s" % (avg_of_records, fps_unit)) print("\tFPS: %.3f %s" % (fps, fps_unit)) if skip_steps > 0: - print("average ips of %d steps, skip %d steps:" % (count, skip_steps)) + print("average ips of %d steps, skip %d steps:" % + (count, skip_steps)) print("\tAvg: %.3f %s" % (avg_of_records_skipped, fps_unit)) print("\tMin: %.3f %s" % (skip_min, fps_unit)) print("\tMax: %.3f %s" % (skip_max, fps_unit)) @@ -199,7 +248,8 @@ class TimeAnalyzer(object): print("\tAvg: %.3f steps/s" % avg_of_records) print("\tFPS: %.3f %s" % (fps, fps_unit)) if skip_steps > 0: - print("average latency of %d steps, skip %d steps:" % (count, skip_steps)) + print("average latency of %d steps, skip %d steps:" % + (count, skip_steps)) print("\tAvg: %.3f steps/s" % avg_of_records_skipped) print("\tMin: %.3f steps/s" % skip_min) print("\tMax: %.3f steps/s" % skip_max) @@ -209,7 +259,8 @@ class TimeAnalyzer(object): print("\tAvg: %.3f s/step" % avg_of_records) print("\tFPS: %.3f %s" % (fps, fps_unit)) if skip_steps > 0: - print("average latency of %d steps, skip %d steps:" % (count, skip_steps)) + print("average latency of %d steps, skip %d steps:" % + (count, skip_steps)) print("\tAvg: %.3f s/step" % avg_of_records_skipped) print("\tMin: %.3f s/step" % skip_min) print("\tMax: %.3f s/step" % skip_max) @@ -236,7 +287,8 @@ if __name__ == "__main__": if args.gpu_num == 1: run_info["log_with_profiler"] = args.log_with_profiler run_info["profiler_path"] = args.profiler_path - analyzer = TimeAnalyzer(args.filename, args.keyword, args.separator, args.position, args.range) + analyzer = TimeAnalyzer(args.filename, args.keyword, args.separator, + args.position, args.range) run_info["FINAL_RESULT"], run_info["UNIT"] = analyzer.analysis( batch_size=args.base_batch_size, gpu_num=args.gpu_num, @@ -245,29 +297,50 @@ if __name__ == "__main__": run_mode=args.run_mode, unit=args.ips_unit) try: - if int(os.getenv('job_fail_flag')) == 1 or int(run_info["FINAL_RESULT"]) == 0: + if int(os.getenv('job_fail_flag')) == 1 or int(run_info[ + "FINAL_RESULT"]) == 0: run_info["JOB_FAIL_FLAG"] = 1 except: pass elif args.index == 3: run_info["FINAL_RESULT"] = {} - records_fo_total = TimeAnalyzer(args.filename, 'Framework overhead', None, 3, '').records - records_fo_ratio = TimeAnalyzer(args.filename, 'Framework overhead', None, 5).records - records_ct_total = TimeAnalyzer(args.filename, 'Computation time', None, 3, '').records - records_gm_total = TimeAnalyzer(args.filename, 'GpuMemcpy Calls', None, 4, '').records - records_gm_ratio = TimeAnalyzer(args.filename, 'GpuMemcpy Calls', None, 6).records - records_gmas_total = TimeAnalyzer(args.filename, 'GpuMemcpyAsync Calls', None, 4, '').records - records_gms_total = TimeAnalyzer(args.filename, 'GpuMemcpySync Calls', None, 4, '').records - run_info["FINAL_RESULT"]["Framework_Total"] = records_fo_total[0] if records_fo_total else 0 - run_info["FINAL_RESULT"]["Framework_Ratio"] = records_fo_ratio[0] if records_fo_ratio else 0 - run_info["FINAL_RESULT"]["ComputationTime_Total"] = records_ct_total[0] if records_ct_total else 0 - run_info["FINAL_RESULT"]["GpuMemcpy_Total"] = records_gm_total[0] if records_gm_total else 0 - run_info["FINAL_RESULT"]["GpuMemcpy_Ratio"] = records_gm_ratio[0] if records_gm_ratio else 0 - run_info["FINAL_RESULT"]["GpuMemcpyAsync_Total"] = records_gmas_total[0] if records_gmas_total else 0 - run_info["FINAL_RESULT"]["GpuMemcpySync_Total"] = records_gms_total[0] if records_gms_total else 0 + records_fo_total = TimeAnalyzer(args.filename, 'Framework overhead', + None, 3, '').records + records_fo_ratio = TimeAnalyzer(args.filename, 'Framework overhead', + None, 5).records + records_ct_total = TimeAnalyzer(args.filename, 'Computation time', + None, 3, '').records + records_gm_total = TimeAnalyzer(args.filename, + 'GpuMemcpy Calls', + None, 4, '').records + records_gm_ratio = TimeAnalyzer(args.filename, + 'GpuMemcpy Calls', + None, 6).records + records_gmas_total = TimeAnalyzer(args.filename, + 'GpuMemcpyAsync Calls', + None, 4, '').records + records_gms_total = TimeAnalyzer(args.filename, + 'GpuMemcpySync Calls', + None, 4, '').records + run_info["FINAL_RESULT"]["Framework_Total"] = records_fo_total[ + 0] if records_fo_total else 0 + run_info["FINAL_RESULT"]["Framework_Ratio"] = records_fo_ratio[ + 0] if records_fo_ratio else 0 + run_info["FINAL_RESULT"][ + "ComputationTime_Total"] = records_ct_total[ + 0] if records_ct_total else 0 + run_info["FINAL_RESULT"]["GpuMemcpy_Total"] = records_gm_total[ + 0] if records_gm_total else 0 + run_info["FINAL_RESULT"]["GpuMemcpy_Ratio"] = records_gm_ratio[ + 0] if records_gm_ratio else 0 + run_info["FINAL_RESULT"][ + "GpuMemcpyAsync_Total"] = records_gmas_total[ + 0] if records_gmas_total else 0 + run_info["FINAL_RESULT"]["GpuMemcpySync_Total"] = records_gms_total[ + 0] if records_gms_total else 0 else: print("Not support!") except Exception: - traceback.print_exc() - print("{}".format(json.dumps(run_info))) # it's required, for the log file path insert to the database - + traceback.print_exc() + print("{}".format(json.dumps(run_info)) + ) # it's required, for the log file path insert to the database diff --git a/benchmark/run_benchmark_det.sh b/benchmark/run_benchmark_det.sh index 54263e953f3f758b318df147d34ee942a247ed18..818aa7e3e1fb342174a0cf5be4d45af0b0205a39 100644 --- a/benchmark/run_benchmark_det.sh +++ b/benchmark/run_benchmark_det.sh @@ -58,3 +58,4 @@ source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合 _set_params $@ #_train # 如果只想产出训练log,不解析,可取消注释 _run # 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开 + diff --git a/benchmark/run_det.sh b/benchmark/run_det.sh index be0c141f7ee168d10eebb6efb57158d18ed02f72..981510c9ae80698dae7f4c8b342dc50442aa7913 100644 --- a/benchmark/run_det.sh +++ b/benchmark/run_det.sh @@ -36,3 +36,4 @@ for model_mode in ${model_mode_list[@]}; do done + diff --git a/test_tipc/benchmark_train.sh b/test_tipc/benchmark_train.sh index fc49cbb3e69771efb49b39c45cd627a314205360..d5b4e2f11a555e4e11aafcc728cdc96ceb5f7fd4 100644 --- a/test_tipc/benchmark_train.sh +++ b/test_tipc/benchmark_train.sh @@ -3,8 +3,6 @@ source test_tipc/common_func.sh # set env python=python -export model_branch=`git symbolic-ref HEAD 2>/dev/null | cut -d"/" -f 3` -export model_commit=$(git log|head -n1|awk '{print $2}') export str_tmp=$(echo `pip list|grep paddlepaddle-gpu|awk -F ' ' '{print $2}'`) export frame_version=${str_tmp%%.post*} export frame_commit=$(echo `${python} -c "import paddle;print(paddle.version.commit)"`) diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index 158b8cb8bc25dd1e33e8c7b8d3a8bb76f9ad7624..62451417287228868c33f778f3aae796b53dabcf 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -24,7 +24,17 @@ if [ ${MODE} = "benchmark_train" ];then pip install -r requirements.txt if [[ ${model_name} =~ "det_mv3_db_v2_0" || ${model_name} =~ "det_r50_vd_east_v2_0" || ${model_name} =~ "det_r50_vd_pse_v2_0" || ${model_name} =~ "det_r18_db_v2_0" ]];then rm -rf ./train_data/icdar2015 - wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/MobileNetV3_large_x0_5_pretrained.pdparams --no-check-certificate + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar --no-check-certificate + cd ./train_data/ && tar xf icdar2015.tar && cd ../ + fi + if [[ ${model_name} =~ "det_r50_vd_east_v2_0" || ${model_name} =~ "det_r50_vd_pse_v2_0" ]];then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet50_vd_ssld_pretrained.pdparams --no-check-certificate + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar --no-check-certificate + cd ./train_data/ && tar xf icdar2015.tar && cd ../ + fi + if [[ ${model_name} =~ "det_r18_db_v2_0" ]];then + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/pretrained/ResNet18_vd_pretrained.pdparams --no-check-certificate wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar --no-check-certificate cd ./train_data/ && tar xf icdar2015.tar && cd ../ fi