From bb6d5356874862904e61f786201be8361a126825 Mon Sep 17 00:00:00 2001 From: Liufang Sang Date: Fri, 11 Oct 2019 16:22:14 +0800 Subject: [PATCH] Fix params filename, pretrain model path (#3513) --- slim/quantization/README.md | 6 +++--- slim/quantization/freeze.py | 6 +++--- slim/quantization/yolov3_mobilenet_v1_voc.yml | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/slim/quantization/README.md b/slim/quantization/README.md index b044778e1..8136f29f3 100644 --- a/slim/quantization/README.md +++ b/slim/quantization/README.md @@ -121,17 +121,17 @@ QuantizationFreezePass主要用于改变IrGraph中量化op和反量化op的顺 python eval.py --model_path ${checkpoint_path}/${epoch_id}/eval_model/ --model_name __model__ --params_name __params__ -c yolov3_mobilenet_v1_voc.yml ``` -在评估之后,选取效果最好的epoch的模型,可使用脚本 slim/quantization/freeze.py将该模型转化为以上介绍的三种模型:float模型,int8模型,mobile模型,需要配置的参数为: +在评估之后,选取效果最好的epoch的模型,可使用脚本 slim/quantization/freeze.py将该模型转化为以上介绍的三种模型:FP32模型,int8模型,mobile模型,需要配置的参数为: - model_path, 加载的模型路径,`为${checkpoint_path}/${epoch_id}/eval_model/` - weight_quant_type 模型参数的量化方式,和配置文件中的类型保持一致 - save_path `FP32`, `8-bit`, `mobile`模型的保存路径,分别为 `${save_path}/float/`, `${save_path}/int8/`, `${save_path}/mobile/` ### 最终评估模型 -最终使用的评估模型是float模型,使用脚本slim/quantization/eval.py中为使用该模型在评估数据集上做评估的示例。 +最终使用的评估模型是FP32模型,使用脚本slim/quantization/eval.py中为使用该模型在评估数据集上做评估的示例。 运行命令为: ``` -python eval.py --model_path ${float_model_path} --model_name model --params_name params -c yolov3_mobilenet_v1_voc.yml +python eval.py --model_path ${float_model_path} --model_name model --params_name weights -c yolov3_mobilenet_v1_voc.yml ``` ## 预测 diff --git a/slim/quantization/freeze.py b/slim/quantization/freeze.py index f9785a080..e3999c99c 100644 --- a/slim/quantization/freeze.py +++ b/slim/quantization/freeze.py @@ -175,7 +175,7 @@ def main(): executor=exe, main_program=server_program, model_filename='model', - params_filename='params') + params_filename='weights') logger.info("convert the weights into int8 type") convert_int8_pass = ConvertToInt8Pass( @@ -190,7 +190,7 @@ def main(): executor=exe, main_program=server_int8_program, model_filename='model', - params_filename='params') + params_filename='weights') logger.info("convert the freezed pass to paddle-lite execution") mobile_pass = TransformForMobilePass() @@ -203,7 +203,7 @@ def main(): executor=exe, main_program=mobile_program, model_filename='model', - params_filename='params') + params_filename='weights') diff --git a/slim/quantization/yolov3_mobilenet_v1_voc.yml b/slim/quantization/yolov3_mobilenet_v1_voc.yml index 2c7c8060a..a006ce775 100644 --- a/slim/quantization/yolov3_mobilenet_v1_voc.yml +++ b/slim/quantization/yolov3_mobilenet_v1_voc.yml @@ -9,7 +9,7 @@ save_dir: output snapshot_iter: 2000 metric: VOC map_type: 11point -pretrain_weights: http://paddle-imagenet-models-name.bj.bcebos.com/MobileNetV1_pretrained.tar +pretrain_weights: https://paddlemodels.bj.bcebos.com/object_detection/yolov3_mobilenet_v1_voc.tar weights: output/yolov3_mobilenet_v1_voc/model_final num_classes: 20 -- GitLab