未验证 提交 793b1858 编写于 作者: K Kaipeng Deng 提交者: GitHub

add prune/eval.py and update model zoo (#207)

* add prune/eval.py and update model zoo
上级 7eadb565
......@@ -37,6 +37,10 @@
| 骨架网络 | 裁剪策略 | 输入尺寸 | Box AP | 下载 |
| :----------------| :-------: | :------: |:------: | :-----------------------------------------------------: |
| ResNet50-vd-dcn | sensity | 320 | 39.8 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/prune/yolov3_r50_dcn_prune1x.tar) |
| ResNet50-vd-dcn | sensity | 320 | 38.3 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/prune/yolov3_r50_dcn_prune578.tar) |
| MobileNetV1 | sensity | 608 | 30.2 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/prune/yolov3_mobilenet_v1_prune1x.tar) |
| MobileNetV1 | sensity | 416 | 29.7 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/prune/yolov3_mobilenet_v1_prune1x.tar) |
| MobileNetV1 | sensity | 320 | 27.2 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/prune/yolov3_mobilenet_v1_prune1x.tar) |
| MobileNetV1 | r578 | 608 | 27.8 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/prune/yolov3_mobilenet_v1_prune578.tar) |
| MobileNetV1 | r578 | 416 | 26.8 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/prune/yolov3_mobilenet_v1_prune578.tar) |
| MobileNetV1 | r578 | 320 | 24.0 | [下载链接](https://paddlemodels.bj.bcebos.com/PaddleSlim/prune/yolov3_mobilenet_v1_prune578.tar) |
......
......@@ -58,7 +58,19 @@ python prune.py \
--pruned_ratios="0.2 0.3 0.4"
```
## 5. 扩展模型
## 5. 评估剪裁模型
训练剪裁任务完成后,可通过`eval.py`评估剪裁模型精度,通过`--pruned_params``--pruned_ratios`指定裁剪的参数名称列表和各参数裁剪比例。
```
python eval.py \
-c ../../configs/yolov3_mobilenet_v1_voc.yml \
--pruned_params "yolo_block.0.0.0.conv.weights,yolo_block.0.0.1.conv.weights,yolo_block.0.1.0.conv.weights" \
--pruned_ratios="0.2 0.3 0.4" \
-o weights=output/yolov3_mobilenet_v1_voc/model_final
```
## 6. 扩展模型
如果需要对自己的模型进行修改,可以参考`prune.py`中对`paddleslim.prune.Pruner`接口的调用方式,基于自己的模型训练脚本进行修改。
本节我们介绍的剪裁示例,需要用户根据先验知识指定每层的剪裁率,除此之外,PaddleSlim还提供了敏感度分析等功能,协助用户选择合适的剪裁率。更多详情请参考:[PaddleSlim使用文档](https://paddlepaddle.github.io/PaddleSlim/)
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
)
import paddle.fluid as fluid
from paddleslim.prune import Pruner
from paddleslim.analysis import flops
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results
import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.check import check_gpu, check_version
from ppdet.data.reader import create_reader
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.utils.cli import ArgsParser
import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def main():
"""
Main evaluate function
"""
cfg = load_config(FLAGS.config)
if 'architecture' in cfg:
main_arch = cfg.architecture
else:
raise ValueError("'architecture' not specified in config file.")
merge_config(FLAGS.opt)
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
# check if paddlepaddle version is satisfied
check_version()
multi_scale_test = getattr(cfg, 'MultiScaleTEST', None)
# define executor
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# build program
model = create(main_arch)
startup_prog = fluid.Program()
eval_prog = fluid.Program()
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
inputs_def = cfg['EvalReader']['inputs_def']
feed_vars, loader = model.build_inputs(**inputs_def)
if multi_scale_test is None:
fetches = model.eval(feed_vars)
else:
fetches = model.eval(feed_vars, multi_scale_test)
eval_prog = eval_prog.clone(True)
reader = create_reader(cfg.EvalReader)
loader.set_sample_list_generator(reader, place)
dataset = cfg['EvalReader']['dataset']
# eval already exists json file
if FLAGS.json_eval:
logger.info(
"In json_eval mode, PaddleDetection will evaluate json files in "
"output_eval directly. And proposal.json, bbox.json and mask.json "
"will be detected by default.")
json_eval_results(
cfg.metric, json_directory=FLAGS.output_eval, dataset=dataset)
return
pruned_params = FLAGS.pruned_params
assert (
FLAGS.pruned_params is not None
), "FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option."
pruned_params = FLAGS.pruned_params.strip().split(",")
logger.info("pruned params: {}".format(pruned_params))
pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(",")]
logger.info("pruned ratios: {}".format(pruned_ratios))
assert (len(pruned_params) == len(pruned_ratios)
), "The length of pruned params and pruned ratios should be equal."
assert (pruned_ratios > [0] * len(pruned_ratios) and
pruned_ratios < [1] * len(pruned_ratios)
), "The elements of pruned ratios should be in range (0, 1)."
base_flops = flops(eval_prog)
pruner = Pruner()
eval_prog, _, _ = pruner.prune(
eval_prog,
fluid.global_scope(),
params=pruned_params,
ratios=pruned_ratios,
place=place,
only_graph=True)
pruned_flops = flops(eval_prog)
logger.info("pruned FLOPS: {}".format(
float(base_flops - pruned_flops) / base_flops))
compile_program = fluid.compiler.CompiledProgram(
eval_prog).with_data_parallel()
assert cfg.metric != 'OID', "eval process of OID dataset \
is not supported."
if cfg.metric == "WIDERFACE":
raise ValueError("metric type {} does not support in tools/eval.py, "
"please use tools/face_eval.py".format(cfg.metric))
assert cfg.metric in ['COCO', 'VOC'], \
"unknown metric type {}".format(cfg.metric)
extra_keys = []
if cfg.metric == 'COCO':
extra_keys = ['im_info', 'im_id', 'im_shape']
if cfg.metric == 'VOC':
extra_keys = ['gt_bbox', 'gt_class', 'is_difficult']
keys, values, cls = parse_fetches(fetches, eval_prog, extra_keys)
# whether output bbox is normalized in model output layer
is_bbox_normalized = False
if hasattr(model, 'is_bbox_normalized') and \
callable(model.is_bbox_normalized):
is_bbox_normalized = model.is_bbox_normalized()
sub_eval_prog = None
sub_keys = None
sub_values = None
# build sub-program
if 'Mask' in main_arch and multi_scale_test:
sub_eval_prog = fluid.Program()
with fluid.program_guard(sub_eval_prog, startup_prog):
with fluid.unique_name.guard():
inputs_def = cfg['EvalReader']['inputs_def']
inputs_def['mask_branch'] = True
feed_vars, eval_loader = model.build_inputs(**inputs_def)
sub_fetches = model.eval(
feed_vars, multi_scale_test, mask_branch=True)
assert cfg.metric == 'COCO'
extra_keys = ['im_id', 'im_shape']
sub_keys, sub_values, _ = parse_fetches(sub_fetches, sub_eval_prog,
extra_keys)
sub_eval_prog = sub_eval_prog.clone(True)
# load model
exe.run(startup_prog)
if 'weights' in cfg:
checkpoint.load_params(exe, eval_prog, cfg.weights)
results = eval_run(exe, compile_program, loader, keys, values, cls, cfg,
sub_eval_prog, sub_keys, sub_values)
# evaluation
resolution = None
if 'mask' in results[0]:
resolution = model.mask_head.resolution
# if map_type not set, use default 11point, only use in VOC eval
map_type = cfg.map_type if 'map_type' in cfg else '11point'
eval_results(
results,
cfg.metric,
cfg.num_classes,
resolution,
is_bbox_normalized,
FLAGS.output_eval,
map_type,
dataset=dataset)
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"--json_eval",
action='store_true',
default=False,
help="Whether to re eval with already exists bbox.json or mask.json")
parser.add_argument(
"-f",
"--output_eval",
default=None,
type=str,
help="Evaluation file directory, default is current directory.")
parser.add_argument(
"-p",
"--pruned_params",
default=None,
type=str,
help="The parameters to be pruned when calculating sensitivities.")
parser.add_argument(
"--pruned_ratios",
default=None,
type=str,
help="The ratios pruned iteratively for each parameter when calculating sensitivities."
)
FLAGS = parser.parse_args()
main()
......@@ -115,11 +115,13 @@ def main():
train_values.append(lr)
if FLAGS.print_params:
print("-------------------------All parameters in current graph----------------------")
param_delimit_str = '-' * 20 + "All parameters in current graph" + '-' * 20
print(param_delimit_str)
for block in train_prog.blocks:
for param in block.all_parameters():
print("parameter name: {}\tshape: {}".format(param.name, param.shape))
print("------------------------------------------------------------------------------")
print("parameter name: {}\tshape: {}".format(param.name,
param.shape))
print('-' * len(param_delimit_str))
return
if FLAGS.eval:
......@@ -174,19 +176,20 @@ def main():
checkpoint.load_checkpoint(exe, train_prog, FLAGS.resume_checkpoint)
start_iter = checkpoint.global_step()
elif cfg.pretrain_weights:
checkpoint.load_params(
exe, train_prog, cfg.pretrain_weights)
checkpoint.load_params(exe, train_prog, cfg.pretrain_weights)
pruned_params = FLAGS.pruned_params
assert (FLAGS.pruned_params is not None), "FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option."
assert FLAGS.pruned_params is not None, \
"FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option."
pruned_params = FLAGS.pruned_params.strip().split(",")
logger.info("pruned params: {}".format(pruned_params))
pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(" ")]
pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(",")]
logger.info("pruned ratios: {}".format(pruned_ratios))
assert(len(pruned_params) == len(pruned_ratios)), "The length of pruned params and pruned ratios should be equal."
assert(pruned_ratios > [0] * len(pruned_ratios) and pruned_ratios < [1] * len(pruned_ratios)), "The elements of pruned ratios should be in range (0, 1)."
assert len(pruned_params) == len(pruned_ratios), \
"The length of pruned params and pruned ratios should be equal."
assert (pruned_ratios > [0] * len(pruned_ratios) and
pruned_ratios < [1] * len(pruned_ratios)
), "The elements of pruned ratios should be in range (0, 1)."
pruner = Pruner()
train_prog = pruner.prune(
......@@ -213,11 +216,11 @@ def main():
place=place,
only_graph=True)[0]
pruned_flops = flops(eval_prog)
logger.info("FLOPs -{}; total FLOPs: {}; pruned FLOPs: {}".format(float(base_flops - pruned_flops)/base_flops, base_flops, pruned_flops))
logger.info("FLOPs -{}; total FLOPs: {}; pruned FLOPs: {}".format(
float(base_flops - pruned_flops) / base_flops, base_flops,
pruned_flops))
compiled_eval_prog = fluid.compiler.CompiledProgram(eval_prog)
train_reader = create_reader(cfg.TrainReader, (cfg.max_iters - start_iter) *
devices_num, cfg)
train_loader.set_sample_list_generator(train_reader, place)
......@@ -248,12 +251,10 @@ def main():
tb_loss_step = 0
tb_mAP_step = 0
if FLAGS.eval:
# evaluation
results = eval_run(exe, compiled_eval_prog, eval_loader,
eval_keys, eval_values, eval_cls)
results = eval_run(exe, compiled_eval_prog, eval_loader, eval_keys,
eval_values, eval_cls)
resolution = None
if 'mask' in results[0]:
resolution = model.mask_head.resolution
......@@ -268,8 +269,6 @@ def main():
map_type,
dataset=dataset)
for it in range(start_iter, cfg.max_iters):
start_time = end_time
end_time = time.time()
......@@ -373,9 +372,10 @@ if __name__ == '__main__':
help="The parameters to be pruned when calculating sensitivities.")
parser.add_argument(
"--pruned_ratios",
default="0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9",
default=None,
type=str,
help="The ratios pruned iteratively for each parameter when calculating sensitivities.")
help="The ratios pruned iteratively for each parameter when calculating sensitivities."
)
parser.add_argument(
"-P",
"--print_params",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册