diff --git a/deploy/slim/quantization/export_model.py b/deploy/slim/quantization/export_model.py index fe843d413341aac6a72f9b4c8eb8c481b8076d85..e9c1a8d31110ef20dd66be28d78b1e866fcd85ae 100755 --- a/deploy/slim/quantization/export_model.py +++ b/deploy/slim/quantization/export_model.py @@ -110,10 +110,12 @@ def main(): # build dataloader valid_dataloader = build_dataloader(config, 'Eval', device, logger) - # start eval + use_srn = config['Architecture']['algorithm'] == "SRN" model_type = config['Architecture']['model_type'] - metric = program.eval(model, valid_dataloader, post_process_class, - eval_class, model_type) + # start eval + metirc = program.eval(model, valid_dataloader, post_process_class, + eval_class, model_type, use_srn) + logger.info('metric eval ***************') for k, v in metric.items(): logger.info('{}:{}'.format(k, v)) diff --git a/test/ocr_det_params.txt b/test/ocr_det_params.txt index 9752ba435992b817e0349a671004e226a17ad026..01ac82d3d7d459ca324ec61cfcaac2386660a211 100644 --- a/test/ocr_det_params.txt +++ b/test/ocr_det_params.txt @@ -1,7 +1,7 @@ model_name:ocr_det python:python3.7 -gpu_list:-1|0|0,1 -Global.auto_cast:False|True +gpu_list:0|0,1 +Global.auto_cast:False Global.epoch_num:10 Global.save_model_dir:./output/ Global.save_inference_dir:./output/ @@ -9,7 +9,7 @@ Train.loader.batch_size_per_card: Global.use_gpu Global.pretrained_model -trainer:norm|pact|fpgm +trainer:norm|pact norm_train:tools/train.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained quant_train:deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy fpgm_train:null diff --git a/test/prepare.sh b/test/prepare.sh index 42f12b57257626153d3635f6cb3dce70f2355cef..150682469641a784f641313d361bb921d6d9dfb8 100644 --- a/test/prepare.sh +++ b/test/prepare.sh @@ -94,7 +94,7 @@ for train_model in ${train_model_list[*]}; do # eval for slim_trainer in ${trainer_list[*]}; do if [ ${slim_trainer} = "norm" ]; then - if [ ${model_name} = "det" ]; then + if [ ${model_name} = "ocr_det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar cd ./inference && tar xf ${eval_model_name}.tar && cd ../ @@ -104,7 +104,7 @@ for train_model in ${train_model_list[*]}; do cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi elif [ ${slim_trainer} = "pact" ]; then - if [ ${model_name} = "det" ]; then + if [ ${model_name} = "ocr_det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_quant_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_quant_train.tar cd ./inference && tar xf ${eval_model_name}.tar && cd ../ @@ -114,7 +114,7 @@ for train_model in ${train_model_list[*]}; do cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi elif [ ${slim_trainer} = "distill" ]; then - if [ ${model_name} = "det" ]; then + if [ ${model_name} = "ocr_det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_distill_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_distill_train.tar cd ./inference && tar xf ${eval_model_name}.tar && cd ../ @@ -124,7 +124,7 @@ for train_model in ${train_model_list[*]}; do cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi elif [ ${slim_trainer} = "fpgm" ]; then - if [ ${model_name} = "det" ]; then + if [ ${model_name} = "ocr_det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_prune_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_train.tar cd ./inference && tar xf ${eval_model_name}.tar && cd ../ diff --git a/test/test.sh b/test/test.sh index b95b8ead2b4c0fe5fde32aef5289db037a67d06a..2a27563ffaa2b1b96b58cbff89546acf7a286210 100644 --- a/test/test.sh +++ b/test/test.sh @@ -110,7 +110,7 @@ function func_inference(){ for threads in ${cpu_threads_list[*]}; do for batch_size in ${batch_size_list[*]}; do _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}" - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path}" + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" eval $command status_check $? "${command}" "${status_log}" done @@ -124,7 +124,7 @@ function func_inference(){ fi for batch_size in ${batch_size_list[*]}; do _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}" - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path}" + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" eval $command status_check $? "${command}" "${status_log}" done @@ -138,9 +138,9 @@ if [ ${MODE} != "infer" ]; then IFS="|" for gpu in ${gpu_list[*]}; do - use_gpu=True + train_use_gpu=True if [ ${gpu} = "-1" ];then - use_gpu=False + train_use_gpu=False env="" elif [ ${#gpu} -le 1 ];then env="export CUDA_VISIBLE_DEVICES=${gpu}" @@ -181,7 +181,7 @@ for gpu in ${gpu_list[*]}; do save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" if [ ${#gpu} -le 2 ];then # epoch_num #TODO - cmd="${python} ${run_train} ${train_use_gpu_key}=${use_gpu} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log} " + cmd="${python} ${run_train} ${train_use_gpu_key}=${train_use_gpu} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log} " elif [ ${#gpu} -le 15 ];then cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log}" else diff --git a/tools/infer/predict_det.py b/tools/infer/predict_det.py index 4253964ec782bce0a292f4fb6e1927d7e50962c2..bbf3659cbc6c34e550ba08440312a09da6362df0 100755 --- a/tools/infer/predict_det.py +++ b/tools/infer/predict_det.py @@ -106,7 +106,7 @@ class TextDetector(object): model_precision=args.precision, batch_size=1, data_shape="dynamic", - save_path="./output/auto_log.lpg", + save_path=args.save_log_path, inference_config=self.config, pids=pid, process_name=None, @@ -174,7 +174,7 @@ class TextDetector(object): data = {'image': img} st = time.time() - + if args.benchmark: self.autolog.times.start() @@ -212,7 +212,7 @@ class TextDetector(object): else: raise NotImplementedError - self.predictor.try_shrink_memory() + #self.predictor.try_shrink_memory() post_result = self.postprocess_op(preds, shape_list) dt_boxes = post_result[0]['points'] if self.det_algorithm == "SAST" and self.det_sast_polygon: @@ -262,7 +262,6 @@ if __name__ == "__main__": "det_res_{}".format(img_name_pure)) cv2.imwrite(img_path, src_im) logger.info("The visualized image saved in {}".format(img_path)) - + if args.benchmark: text_detector.autolog.report() - diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 021494ceea428709f4155e0d7c1142ca5a31858c..cf14e4abd71f1ac6e2ceec11163e635daef11f4d 100755 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -176,6 +176,7 @@ def create_predictor(args, mode, logger): "conv2d_59.tmp_0": [1, 96, 20, 20], "nearest_interp_v2_1.tmp_0": [1, 96, 10, 10], "nearest_interp_v2_2.tmp_0": [1, 96, 20, 20], + "conv2d_124.tmp_0": [1, 96, 20, 20], "nearest_interp_v2_3.tmp_0": [1, 24, 20, 20], "nearest_interp_v2_4.tmp_0": [1, 24, 20, 20], "nearest_interp_v2_5.tmp_0": [1, 24, 20, 20], @@ -188,6 +189,7 @@ def create_predictor(args, mode, logger): "conv2d_91.tmp_0": [1, 96, 200, 200], "conv2d_59.tmp_0": [1, 96, 400, 400], "nearest_interp_v2_1.tmp_0": [1, 96, 200, 200], + "conv2d_124.tmp_0": [1, 256, 400, 400], "nearest_interp_v2_2.tmp_0": [1, 96, 400, 400], "nearest_interp_v2_3.tmp_0": [1, 24, 400, 400], "nearest_interp_v2_4.tmp_0": [1, 24, 400, 400], @@ -202,6 +204,7 @@ def create_predictor(args, mode, logger): "conv2d_59.tmp_0": [1, 96, 160, 160], "nearest_interp_v2_1.tmp_0": [1, 96, 80, 80], "nearest_interp_v2_2.tmp_0": [1, 96, 160, 160], + "conv2d_124.tmp_0": [1, 256, 160, 160], "nearest_interp_v2_3.tmp_0": [1, 24, 160, 160], "nearest_interp_v2_4.tmp_0": [1, 24, 160, 160], "nearest_interp_v2_5.tmp_0": [1, 24, 160, 160], @@ -237,7 +240,7 @@ def create_predictor(args, mode, logger): # enable memory optim config.enable_memory_optim() - config.disable_glog_info() + #config.disable_glog_info() config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass") if mode == 'table':