diff --git a/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_ptq_infer_python.txt b/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d5863d8c06a2647b2c52bee77fac02368023f52c --- /dev/null +++ b/test_tipc/config/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5_train_ptq_infer_python.txt @@ -0,0 +1,54 @@ +===========================train_params=========================== +model_name:GeneralRecognition_PPLCNet_x2_5 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=100 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:pact_train +norm_train:null +pact_train:tools/train.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:null +quant_export:tools/export_model.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/GeneralRecognition/GeneralRecognition_PPLCNet_x2_5.yaml -o Global.save_inference_dir=./general_PPLCNet_x2_5_lite_v1.0_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar +infer_model:./general_PPLCNet_x2_5_lite_v1.0_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_rec.py -c configs/inference_rec.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.rec_inference_model_dir:../inference +-o Global.infer_imgs:../dataset/Aliproduct/demo_test/ +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_ptq_infer_python.txt b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..53628ef5f0bdf54af9feafe58abbe8563330324b --- /dev/null +++ b/test_tipc/config/MobileNetV3/MobileNetV3_large_x1_0_train_ptq_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:MobileNetV3_large_x1_0 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +fpgm_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml +quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml +fpgm_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml -o Global.save_inference_dir=./MobileNetV3_large_x1_0_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/MobileNetV3_large_x1_0_infer.tar +infer_model:./MobileNetV3_large_x1_0_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:256|640 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PPHGNet/PPHGNet_small_train_ptq_infer_python.txt b/test_tipc/config/PPHGNet/PPHGNet_small_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..ba76846bbb3439c4ce879e32d2fef59d571d99da --- /dev/null +++ b/test_tipc/config/PPHGNet/PPHGNet_small_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPHGNet_small +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPHGNet/PPHGNet_small.yaml -o Global.save_inference_dir=./PPHGNet_small_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar +infer_model:./PPHGNet_small_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml -o PreProcess.transform_ops.0.ResizeImage.resize_short=236 +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/PPLCNet/PPLCNet_x1_0_train_ptq_infer_python.txt b/test_tipc/config/PPLCNet/PPLCNet_x1_0_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..f38e567fea950a4366255848f9f90cc58c6bd687 --- /dev/null +++ b/test_tipc/config/PPLCNet/PPLCNet_x1_0_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNet_x1_0 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNet/PPLCNet_x1_0.yaml -o Global.save_inference_dir=./PPLCNet_x1_0_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x1_0_infer.tar +infer_model:./PPLCNet_x1_0_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/PPLCNetV2/PPLCNetV2_base_train_ptq_infer_python.txt b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..d77d1ccc1f15e98c45070e868c3bc546fd18521e --- /dev/null +++ b/test_tipc/config/PPLCNetV2/PPLCNetV2_base_train_ptq_infer_python.txt @@ -0,0 +1,53 @@ +===========================train_params=========================== +model_name:PPLCNetV2_base +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.first_bs:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Global.seed=1234 -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/PPLCNetV2/PPLCNetV2_base.yaml -o Global.save_inference_dir=./PPLCNetV2_base_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNetV2_base_infer.tar +infer_model:./PPLCNetV2_base_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] diff --git a/test_tipc/config/ResNet/ResNet50_vd_train_ptq_infer_python.txt b/test_tipc/config/ResNet/ResNet50_vd_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..6398beceee2a69f9be84eb7999c801a4fac5201a --- /dev/null +++ b/test_tipc/config/ResNet/ResNet50_vd_train_ptq_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:ResNet50_vd +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=200 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +to_static_train:-o Global.to_static=True +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml -o Global.save_inference_dir=./ResNet50_vd_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar +infer_model:./ResNet50_vd_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:128 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_ptq_infer_python.txt b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_ptq_infer_python.txt new file mode 100644 index 0000000000000000000000000000000000000000..c87459d2e4ddac829d92931776cee8c8731f834c --- /dev/null +++ b/test_tipc/config/SwinTransformer/SwinTransformer_tiny_patch4_window7_224_train_ptq_infer_python.txt @@ -0,0 +1,60 @@ +===========================train_params=========================== +model_name:SwinTransformer_tiny_patch4_window7_224 +python:python3.7 +gpu_list:0 +-o Global.device:gpu +-o Global.auto_cast:null +-o Global.epochs:lite_train_lite_infer=2|whole_train_whole_infer=120 +-o Global.output_dir:./output/ +-o DataLoader.Train.sampler.batch_size:8 +-o Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./dataset/ILSVRC2012/val +null:null +## +trainer:norm_train +norm_train:tools/train.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Global.seed=1234 -o DataLoader.Train.sampler.shuffle=False -o DataLoader.Train.loader.num_workers=0 -o DataLoader.Train.loader.use_shared_memory=False +pact_train:null +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml +null:null +## +===========================infer_params========================== +-o Global.save_inference_dir:./inference +-o Global.pretrained_model: +norm_export:tools/export_model.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml +quant_export:null +fpgm_export:null +distill_export:null +kl_quant:deploy/slim/quant_post_static.py -c ppcls/configs/ImageNet/SwinTransformer/SwinTransformer_tiny_patch4_window7_224.yaml -o Global.save_inference_dir=./SwinTransformer_tiny_patch4_window7_224_infer +export2:null +pretrained_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/SwinTransformer_tiny_patch4_window7_224_infer.tar +infer_model:./SwinTransformer_tiny_patch4_window7_224_infer/ +infer_export:True +infer_quant:Fasle +inference:python/predict_cls.py -c configs/inference_cls.yaml +-o Global.use_gpu:True|False +-o Global.enable_mkldnn:False +-o Global.cpu_num_threads:1 +-o Global.batch_size:1 +-o Global.use_tensorrt:False +-o Global.use_fp16:False +-o Global.inference_model_dir:../inference +-o Global.infer_imgs:../deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg +-o Global.save_log_path:null +-o Global.benchmark:False +null:null +null:null +===========================train_benchmark_params========================== +batch_size:64|104 +fp_items:fp32 +epoch:1 +--profiler_options:batch_range=[10,20];state=GPU;tracer_option=Default;profile_path=model.profile +flags:FLAGS_eager_delete_tensor_gb=0.0;FLAGS_fraction_of_gpu_memory_to_use=0.98;FLAGS_conv_workspace_size_limit=4096 +===========================infer_benchmark_params========================== +random_infer_input:[{float32,[3,224,224]}] \ No newline at end of file diff --git a/test_tipc/prepare.sh b/test_tipc/prepare.sh index aa1d44fda2bb71ca06f0cdd27bb4be18909f6b8b..a5a447588eac4bed1a8e7b2fa0f67a2e2029598c 100644 --- a/test_tipc/prepare.sh +++ b/test_tipc/prepare.sh @@ -141,7 +141,7 @@ model_name=$(func_parser_value "${lines[1]}") model_url_value=$(func_parser_value "${lines[35]}") model_url_key=$(func_parser_key "${lines[35]}") -if [[ $FILENAME == *GeneralRecognition* ]]; then +if [[ $model_name == *ShiTu* ]]; then cd dataset rm -rf Aliproduct rm -rf train_reg_all_data.txt @@ -176,22 +176,39 @@ if [[ ${MODE} = "lite_train_lite_infer" ]] || [[ ${MODE} = "lite_train_whole_inf mv val.txt val_list.txt cp -r train/* val/ cd ../../ -elif [[ ${MODE} = "whole_infer" ]] || [[ ${MODE} = "klquant_whole_infer" ]]; then +elif [[ ${MODE} = "whole_infer" ]]; then # download data - cd dataset - rm -rf ILSVRC2012 - wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_infer.tar - tar xf whole_chain_infer.tar - ln -s whole_chain_infer ILSVRC2012 - cd ILSVRC2012 - mv val.txt val_list.txt - ln -s val_list.txt train_list.txt - cd ../../ + if [[ ${model_name} =~ "GeneralRecognition" ]]; then + cd dataset + rm -rf Aliproduct + rm -rf train_reg_all_data.txt + rm -rf demo_train + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/tipc_shitu_demo_data.tar --no-check-certificate + tar -xf tipc_shitu_demo_data.tar + ln -s tipc_shitu_demo_data Aliproduct + ln -s tipc_shitu_demo_data/demo_train.txt train_reg_all_data.txt + ln -s tipc_shitu_demo_data/demo_train demo_train + cd tipc_shitu_demo_data + ln -s demo_test.txt val_list.txt + cd ../../ + else + cd dataset + rm -rf ILSVRC2012 + wget -nc https://paddle-imagenet-models-name.bj.bcebos.com/data/whole_chain/whole_chain_infer.tar + tar xf whole_chain_infer.tar + ln -s whole_chain_infer ILSVRC2012 + cd ILSVRC2012 + mv val.txt val_list.txt + ln -s val_list.txt train_list.txt + cd ../../ + fi # download inference or pretrained model eval "wget -nc $model_url_value" - if [[ $model_url_key == *inference* ]]; then - rm -rf inference - tar xf "${model_name}_infer.tar" + if [[ ${model_url_value} =~ ".tar" ]]; then + tar_name=$(func_get_url_file_name "${model_url_value}") + echo $tar_name + rm -rf {tar_name} + tar xf ${tar_name} fi if [[ $model_name == "SwinTransformer_large_patch4_window7_224" || $model_name == "SwinTransformer_large_patch4_window12_384" ]]; then cmd="mv ${model_name}_22kto1k_pretrained.pdparams ${model_name}_pretrained.pdparams" diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh index 427005cf0601e192d01264e39ed82ed77ab57d0d..da78357aea9534ad3fc255bdeae864d41f0b68d0 100644 --- a/test_tipc/test_train_inference_python.sh +++ b/test_tipc/test_train_inference_python.sh @@ -88,17 +88,17 @@ benchmark_value=$(func_parser_value "${lines[49]}") infer_key1=$(func_parser_key "${lines[50]}") infer_value1=$(func_parser_value "${lines[50]}") if [ ! $epoch_num ]; then - epoch_num=2 + epoch_num=2 fi if [[ $MODE = 'benchmark_train' ]]; then - epoch_num=1 + epoch_num=1 fi -LOG_PATH="./test_tipc/output/${model_name}" +LOG_PATH="./test_tipc/output/${model_name}/${MODE}" mkdir -p ${LOG_PATH} status_log="${LOG_PATH}/results_python.log" -function func_inference(){ +function func_inference() { IFS='|' _python=$1 _script=$2 @@ -110,9 +110,6 @@ function func_inference(){ for use_gpu in ${use_gpu_list[*]}; do if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then for use_mkldnn in ${use_mkldnn_list[*]}; do - if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then - continue - fi for threads in ${cpu_threads_list[*]}; do for batch_size in ${batch_size_list[*]}; do _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log" @@ -136,9 +133,6 @@ function func_inference(){ if [ ${precision} = "True" ] && [ ${use_trt} = "False" ]; then continue fi - if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then - continue - fi for batch_size in ${batch_size_list[*]}; do _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") @@ -161,51 +155,23 @@ function func_inference(){ done } -if [[ ${MODE} = "whole_infer" ]] || [[ ${MODE} = "klquant_whole_infer" ]]; then - IFS="|" - infer_export_flag=(${infer_export_flag}) - if [ ${infer_export_flag} != "null" ] && [ ${infer_export_flag} != "False" ]; then - rm -rf ${infer_model_dir_list/..\//} - export_cmd="${python} ${norm_export} -o Global.pretrained_model=${model_name}_pretrained -o Global.save_inference_dir=${infer_model_dir_list/..\//}" - eval $export_cmd - fi -fi if [[ ${MODE} = "whole_infer" ]]; then - GPUID=$3 - if [ ${#GPUID} -le 0 ];then - env=" " - else - env="export CUDA_VISIBLE_DEVICES=${GPUID}" - fi - # set CUDA_VISIBLE_DEVICES - eval $env - export Count=0 - cd deploy - for infer_model in ${infer_model_dir_list[*]}; do - #run inference - is_quant=${infer_quant_flag[Count]} - echo "is_quant: ${is_quant}" - func_inference "${python}" "${inference_py}" "${infer_model}" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant} - Count=$(($Count + 1)) - done - cd .. - -elif [[ ${MODE} = "klquant_whole_infer" ]]; then # for kl_quant if [ ${kl_quant_cmd_value} != "null" ] && [ ${kl_quant_cmd_value} != "False" ]; then - echo "kl_quant" - command="${python} ${kl_quant_cmd_value}" - eval $command - last_status=${PIPESTATUS[0]} - status_check $last_status "${command}" "${status_log}" "${model_name}" - cd inference/quant_post_static_model - ln -s __model__ inference.pdmodel - ln -s __params__ inference.pdiparams - cd ../../deploy - is_quant=True - func_inference "${python}" "${inference_py}" "${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant} - cd .. + echo "kl_quant" + command="${python} ${kl_quant_cmd_value}" + echo ${command} + eval $command + last_status=${PIPESTATUS[0]} + status_check $last_status "${command}" "${status_log}" "${model_name}" + cd ${infer_model_dir_list}/quant_post_static_model + ln -s __model__ inference.pdmodel + ln -s __params__ inference.pdiparams + cd ../../deploy + is_quant=True + func_inference "${python}" "${inference_py}" "../${infer_model_dir_list}/quant_post_static_model" "../${LOG_PATH}" "${infer_img_dir}" ${is_quant} + cd .. fi else IFS="|" @@ -215,12 +181,12 @@ else train_use_gpu=${USE_GPU_KEY[Count]} Count=$(($Count + 1)) ips="" - if [ ${gpu} = "-1" ];then + if [ ${gpu} = "-1" ]; then env="" - elif [ ${#gpu} -le 1 ];then + elif [ ${#gpu} -le 1 ]; then env="export CUDA_VISIBLE_DEVICES=${gpu}" eval ${env} - elif [ ${#gpu} -le 15 ];then + elif [ ${#gpu} -le 15 ]; then IFS="," array=(${gpu}) env="export CUDA_VISIBLE_DEVICES=${array[0]}" @@ -270,7 +236,7 @@ else set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu_value}") - if [ ${#ips} -le 15 ];then + if [ ${#ips} -le 15 ]; then # if length of ips >= 15, then it is seen as multi-machine # 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0 save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" @@ -289,26 +255,26 @@ else # fi set_save_model=$(func_set_params "${save_model_key}" "${save_log}") - if [ ${#gpu} -le 2 ];then # train with cpu or single gpu + if [ ${#gpu} -le 2 ]; then # train with cpu or single gpu cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} " - elif [ ${#ips} -le 15 ];then # train with multi-gpu + elif [ ${#ips} -le 15 ]; then # train with multi-gpu cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}" - else # train with multi-machine + else # train with multi-machine cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_use_gpu} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}" fi # run train - eval "unset CUDA_VISIBLE_DEVICES" - # export FLAGS_cudnn_deterministic=True - sleep 5 + eval "unset CUDA_VISIBLE_DEVICES" + # export FLAGS_cudnn_deterministic=True + sleep 5 eval $cmd status_check $? "${cmd}" "${status_log}" "${model_name}" sleep 5 - if [[ $FILENAME == *GeneralRecognition* ]]; then - set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}") - else + if [[ $FILENAME == *GeneralRecognition* ]]; then + set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}") + else set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${model_name}/${train_model_name}") - fi + fi # save norm trained models to set pretrain for pact training and fpgm training if [ ${trainer} = ${trainer_norm} ]; then load_norm_train_model=${set_eval_pretrain} @@ -325,11 +291,11 @@ else if [ ${run_export} != "null" ]; then # run export model save_infer_path="${save_log}" - if [[ $FILENAME == *GeneralRecognition* ]]; then - set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}") - else - set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${model_name}/${train_model_name}") - fi + if [[ $FILENAME == *GeneralRecognition* ]]; then + set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/RecModel/${train_model_name}") + else + set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${model_name}/${train_model_name}") + fi set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}" eval $export_cmd @@ -338,12 +304,12 @@ else #run inference eval $env save_infer_path="${save_log}" - cd deploy + cd deploy func_inference "${python}" "${inference_py}" "../${save_infer_path}" "../${LOG_PATH}" "${infer_img_dir}" "${flag_quant}" - cd .. + cd .. fi eval "unset CUDA_VISIBLE_DEVICES" - done # done with: for trainer in ${trainer_list[*]}; do - done # done with: for autocast in ${autocast_list[*]}; do - done # done with: for gpu in ${gpu_list[*]}; do -fi # end if [ ${MODE} = "infer" ]; then + done # done with: for trainer in ${trainer_list[*]}; do + done # done with: for autocast in ${autocast_list[*]}; do + done # done with: for gpu in ${gpu_list[*]}; do +fi # end if [ ${MODE} = "infer" ]; then