diff --git a/deploy/slim/prune/sensitivity_anal.py b/deploy/slim/prune/sensitivity_anal.py index bd2b96497221fd886c83b9401cc8ed2a1a201a50..f80ddd9fb424a4b0c8f30304530d7d20f982598a 100644 --- a/deploy/slim/prune/sensitivity_anal.py +++ b/deploy/slim/prune/sensitivity_anal.py @@ -75,7 +75,7 @@ def main(config, device, logger, vdl_writer): model = build_model(config['Architecture']) flops = paddle.flops(model, [1, 3, 640, 640]) - logger.info(f"FLOPs before pruning: {flops}") + logger.info("FLOPs before pruning: {}".format(flops)) from paddleslim.dygraph import FPGMFilterPruner model.train() @@ -106,8 +106,8 @@ def main(config, device, logger, vdl_writer): def eval_fn(): metric = program.eval(model, valid_dataloader, post_process_class, - eval_class) - logger.info(f"metric['hmean']: {metric['hmean']}") + eval_class, False) + logger.info("metric['hmean']: {}".format(metric['hmean'])) return metric['hmean'] params_sensitive = pruner.sensitive( @@ -123,16 +123,17 @@ def main(config, device, logger, vdl_writer): # calculate pruned params's ratio params_sensitive = pruner._get_ratios_by_loss(params_sensitive, loss=0.02) for key in params_sensitive.keys(): - logger.info(f"{key}, {params_sensitive[key]}") + logger.info("{}, {}".format(key, params_sensitive[key])) + + #params_sensitive = {} + #for param in model.parameters(): + # if 'transpose' not in param.name and 'linear' not in param.name: + # params_sensitive[param.name] = 0.1 plan = pruner.prune_vars(params_sensitive, [0]) - for param in model.parameters(): - if ("weights" in param.name and "conv" in param.name) or ( - "w_0" in param.name and "conv2d" in param.name): - logger.info(f"{param.name}: {param.shape}") flops = paddle.flops(model, [1, 3, 640, 640]) - logger.info(f"FLOPs after pruning: {flops}") + logger.info("FLOPs after pruning: {}".format(flops)) # start train diff --git a/tests/ocr_det_params.txt b/tests/ocr_det_params.txt index 805b6fe2fb6b4424f40e2f2af57e5058468c1ad4..73b12cec9c4f8a8745f34b00222f89ba68ff9d5f 100644 --- a/tests/ocr_det_params.txt +++ b/tests/ocr_det_params.txt @@ -4,7 +4,7 @@ python:python3.7 gpu_list:0|0,1 Global.use_gpu:True|True Global.auto_cast:null -Global.epoch_num:lite_train_infer=2|whole_train_infer=300 +Global.epoch_num:lite_train_infer=1|whole_train_infer=300 Global.save_model_dir:./output/ Train.loader.batch_size_per_card:lite_train_infer=2|whole_train_infer=4 Global.pretrained_model:null @@ -15,7 +15,7 @@ null:null trainer:norm_train|pact_train norm_train:tools/train.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained pact_train:deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o -fpgm_train:null +fpgm_train:deploy/slim/prune/sensitivity_anal.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy distill_train:null null:null null:null @@ -29,7 +29,7 @@ Global.save_inference_dir:./output/ Global.pretrained_model: norm_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o quant_export:deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o -fpgm_export:deploy/slim/prune/export_prune_model.py +fpgm_export:deploy/slim/prune/export_prune_model.py -c configs/det/det_mv3_db.yml -o distill_export:null export1:null export2:null diff --git a/tests/ocr_det_server_params.txt b/tests/ocr_det_server_params.txt new file mode 100644 index 0000000000000000000000000000000000000000..0835cfffa62dd879d092bf829d782e767b9680ad --- /dev/null +++ b/tests/ocr_det_server_params.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ocr_server_det +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_infer=2|whole_train_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_infer=2|whole_train_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train|pact_train +norm_train:tools/train.py -c configs/det/det_r50_vd_db.yml -o Global.pretrained_model="" +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c configs/det/det_mv3_db.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.pretrained_model: +norm_export:tools/export_model.py -c configs/det/det_r50_vd_db.yml -o +quant_export:null +fpgm_export:null +distill_export:null +export1:null +export2:null +## +infer_model:./inference/ch_ppocr_server_v2.0_det_infer/ +infer_export:null +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp32|fp16|int8 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--save_log_path:null +--benchmark:True +null:null + diff --git a/tests/prepare.sh b/tests/prepare.sh index c3b60299339ca15840a63d589b05d56e08cb3b63..5da74d949f5543f50a4dc61b3093aa11c0caabf6 100644 --- a/tests/prepare.sh +++ b/tests/prepare.sh @@ -34,11 +34,14 @@ MODE=$2 if [ ${MODE} = "lite_train_infer" ];then # pretrain lite train data wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams + wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar + cd ./pretrain_models/ && tar xf det_mv3_db_v2.0_train.tar && cd ../ rm -rf ./train_data/icdar2015 rm -rf ./train_data/ic15_data wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar # todo change to bcebos - + wget -nc -P ./deploy/slim/prune https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/sen.pickle + cd ./train_data/ && tar xf icdar2015_lite.tar && tar xf ic15_data.tar ln -s ./icdar2015_lite ./icdar2015 cd ../ @@ -65,6 +68,10 @@ elif [ ${MODE} = "infer" ] || [ ${MODE} = "cpp_infer" ];then wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ + elif [ ${model_name} = "ocr_server_det" ]; then + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar + cd ./inference && tar xf ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_det_data_50.tar && cd ../ else rm -rf ./train_data/ic15_data eval_model_name="ch_ppocr_mobile_v2.0_rec_infer"