未验证 提交 b477705c 编写于 作者: W Walter 提交者: GitHub

Merge pull request #1224 from RainFrost1/slim_chain

update slim for whole_chain
......@@ -12,10 +12,10 @@ train_model_name:latest
train_infer_img_dir:./dataset/ILSVRC2012/val
null:null
##
trainer:norm_train
trainer:norm_train|pact_train|fpgm_train
norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml
pact_train:deploy/slim/slim.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml
fpgm_train:deploy/slim/slim.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml
pact_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml
fpgm_train:tools/train.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml
distill_train:null
null:null
null:null
......@@ -28,8 +28,8 @@ null:null
-o Global.save_inference_dir:./inference
-o Global.pretrained_model:
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV3/MobileNetV3_large_x1_0.yaml
quant_export:deploy/slim/slim.py -m export -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantalization.yaml
fpgm_export:deploy/slim/slim.py -m export -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml
quant_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_quantization.yaml
fpgm_export:tools/export_model.py -c ppcls/configs/slim/MobileNetV3_large_x1_0_prune.yaml
distill_export:null
export1:null
export2:null
......@@ -49,3 +49,10 @@ inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.save_log_path:null
-o Global.benchmark:True
null:null
null:null
===========================cpp_infer_params===========================
use_gpu:0|1
cpu_threads:1|6
use_mkldnn:0|1
use_tensorrt:0|1
use_fp16:0|1
......@@ -12,10 +12,10 @@ train_model_name:latest
train_infer_img_dir:./dataset/ILSVRC2012/val
null:null
##
trainer:norm_train
trainer:norm_train|pact_train|fpgm_train
norm_train:tools/train.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml
pact_train:deploy/slim/slim.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml
fpgm_train:deploy/slim/slim.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml
pact_train:tools/train.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml
fpgm_train:tools/train.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml
distill_train:null
null:null
null:null
......@@ -28,8 +28,8 @@ null:null
-o Global.save_inference_dir:./inference
-o Global.pretrained_model:
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/ResNet/ResNet50_vd.yaml
quant_export:deploy/slim/slim.py -m export -c ppcls/configs/slim/ResNet50_vd_quantalization.yaml
fpgm_export:deploy/slim/slim.py -m export -c ppcls/configs/slim/ResNet50_vd_prune.yaml
quant_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_quantization.yaml
fpgm_export:tools/export_model.py -c ppcls/configs/slim/ResNet50_vd_prune.yaml
distill_export:null
export1:null
export2:null
......
......@@ -380,7 +380,7 @@ else
set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}")
set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}")
set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}")
set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${use_gpu}")
set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${train_use_gpu_value}")
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
# load pretrain from norm training if current trainer is pact or fpgm trainer
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册