提交 c2d68974 编写于 作者: L Liufang Sang 提交者: Bai Yifan

[PaddleSlim] fix details in code format (#3766)

* fix details test=release/1.6

* fix code format  test=release/1.6
上级 a8cf62b1
...@@ -32,11 +32,13 @@ from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass ...@@ -32,11 +32,13 @@ from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
def set_paddle_flags(**kwargs): def set_paddle_flags(**kwargs):
for key, value in kwargs.items(): for key, value in kwargs.items():
if os.environ.get(key, None) is None: if os.environ.get(key, None) is None:
os.environ[key] = str(value) os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before # NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect. # `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags( set_paddle_flags(
...@@ -59,6 +61,8 @@ import logging ...@@ -59,6 +61,8 @@ import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s' FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT) logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
""" """
Run evaluation program, return program outputs. Run evaluation program, return program outputs.
...@@ -71,8 +75,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): ...@@ -71,8 +75,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
has_bbox = 'bbox' in keys has_bbox = 'bbox' in keys
for data in reader(): for data in reader():
data = test_feed.feed(data) data = test_feed.feed(data)
feed_data = {'image': data['image'], feed_data = {'image': data['image'], 'im_size': data['im_size']}
'im_size': data['im_size']}
outs = exe.run(compile_program, outs = exe.run(compile_program,
feed=feed_data, feed=feed_data,
fetch_list=values[0], fetch_list=values[0],
...@@ -123,7 +126,6 @@ def main(): ...@@ -123,7 +126,6 @@ def main():
devices_num = int( devices_num = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count())) os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
if 'eval_feed' not in cfg: if 'eval_feed' not in cfg:
eval_feed = create(main_arch + 'EvalFeed') eval_feed = create(main_arch + 'EvalFeed')
else: else:
...@@ -135,39 +137,36 @@ def main(): ...@@ -135,39 +137,36 @@ def main():
_, test_feed_vars = create_feed(eval_feed, iterable=True) _, test_feed_vars = create_feed(eval_feed, iterable=True)
eval_reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir) eval_reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir)
#eval_pyreader.decorate_sample_list_generator(eval_reader, place)
test_data_feed = fluid.DataFeeder(test_feed_vars.values(), place) test_data_feed = fluid.DataFeeder(test_feed_vars.values(), place)
assert os.path.exists(FLAGS.model_path) assert os.path.exists(FLAGS.model_path)
infer_prog, feed_names, fetch_targets = fluid.io.load_inference_model( infer_prog, feed_names, fetch_targets = fluid.io.load_inference_model(
dirname=FLAGS.model_path, executor=exe, dirname=FLAGS.model_path,
model_filename=FLAGS.model_name, executor=exe,
params_filename=FLAGS.params_name) model_filename=FLAGS.model_name,
params_filename=FLAGS.params_name)
eval_keys = ['bbox', 'gt_box', 'gt_label', 'is_difficult'] eval_keys = ['bbox', 'gt_box', 'gt_label', 'is_difficult']
eval_values = ['multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult'] eval_values = [
'multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult'
]
eval_cls = [] eval_cls = []
eval_values[0] = fetch_targets[0] eval_values[0] = fetch_targets[0]
results = eval_run(exe, infer_prog, eval_reader, results = eval_run(exe, infer_prog, eval_reader, eval_keys, eval_values,
eval_keys, eval_values, eval_cls, test_data_feed) eval_cls, test_data_feed)
resolution = None resolution = None
if 'mask' in results[0]: if 'mask' in results[0]:
resolution = model.mask_head.resolution resolution = model.mask_head.resolution
eval_results(results, eval_feed, cfg.metric, cfg.num_classes, eval_results(results, eval_feed, cfg.metric, cfg.num_classes, resolution,
resolution, False, FLAGS.output_eval) False, FLAGS.output_eval)
if __name__ == '__main__': if __name__ == '__main__':
parser = ArgsParser() parser = ArgsParser()
parser.add_argument( parser.add_argument(
"-m", "-m", "--model_path", default=None, type=str, help="path of checkpoint")
"--model_path",
default=None,
type=str,
help="path of checkpoint")
parser.add_argument( parser.add_argument(
"--output_eval", "--output_eval",
default=None, default=None,
......
...@@ -25,6 +25,7 @@ import numpy as np ...@@ -25,6 +25,7 @@ import numpy as np
from PIL import Image from PIL import Image
sys.path.append("../../") sys.path.append("../../")
def set_paddle_flags(**kwargs): def set_paddle_flags(**kwargs):
for key, value in kwargs.items(): for key, value in kwargs.items():
if os.environ.get(key, None) is None: if os.environ.get(key, None) is None:
...@@ -118,20 +119,19 @@ def main(): ...@@ -118,20 +119,19 @@ def main():
test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img) test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)
test_feed.dataset.add_images(test_images) test_feed.dataset.add_images(test_images)
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace() place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
infer_prog, feed_var_names, fetch_list = fluid.io.load_inference_model( infer_prog, feed_var_names, fetch_list = fluid.io.load_inference_model(
dirname=FLAGS.model_path, model_filename=FLAGS.model_name, dirname=FLAGS.model_path,
params_filename=FLAGS.params_name, model_filename=FLAGS.model_name,
executor=exe) params_filename=FLAGS.params_name,
executor=exe)
reader = create_reader(test_feed) reader = create_reader(test_feed)
feeder = fluid.DataFeeder(place=place, feed_list=feed_var_names, feeder = fluid.DataFeeder(
program=infer_prog) place=place, feed_list=feed_var_names, program=infer_prog)
# parse infer fetches # parse infer fetches
assert cfg.metric in ['COCO', 'VOC'], \ assert cfg.metric in ['COCO', 'VOC'], \
...@@ -141,7 +141,9 @@ def main(): ...@@ -141,7 +141,9 @@ def main():
extra_keys = ['im_info', 'im_id', 'im_shape'] extra_keys = ['im_info', 'im_id', 'im_shape']
if cfg['metric'] == 'VOC': if cfg['metric'] == 'VOC':
extra_keys = ['im_id', 'im_shape'] extra_keys = ['im_id', 'im_shape']
keys, values, _ = parse_fetches({'bbox':fetch_list}, infer_prog, extra_keys) keys, values, _ = parse_fetches({
'bbox': fetch_list
}, infer_prog, extra_keys)
# parse dataset category # parse dataset category
if cfg.metric == 'COCO': if cfg.metric == 'COCO':
...@@ -176,7 +178,7 @@ def main(): ...@@ -176,7 +178,7 @@ def main():
if infer_time: if infer_time:
warmup_times = 10 warmup_times = 10
repeats_time = 100 repeats_time = 100
feed_data_dict = feeder.feed(feed_data); feed_data_dict = feeder.feed(feed_data)
for i in range(warmup_times): for i in range(warmup_times):
exe.run(compile_prog, exe.run(compile_prog,
feed=feed_data_dict, feed=feed_data_dict,
...@@ -189,7 +191,8 @@ def main(): ...@@ -189,7 +191,8 @@ def main():
fetch_list=fetch_list, fetch_list=fetch_list,
return_numpy=False) return_numpy=False)
print("infer time: {} ms/sample".format((time.time()-start_time) * 1000 / repeats_time)) print("infer time: {} ms/sample".format((time.time() - start_time) *
1000 / repeats_time))
infer_time = False infer_time = False
outs = exe.run(compile_prog, outs = exe.run(compile_prog,
...@@ -282,10 +285,7 @@ if __name__ == '__main__': ...@@ -282,10 +285,7 @@ if __name__ == '__main__':
default="tb_log_dir/image", default="tb_log_dir/image",
help='Tensorboard logging directory for image.') help='Tensorboard logging directory for image.')
parser.add_argument( parser.add_argument(
'--model_path', '--model_path', type=str, default=None, help="inference model path")
type=str,
default=None,
help="inference model path")
parser.add_argument( parser.add_argument(
'--model_name', '--model_name',
type=str, type=str,
......
...@@ -28,11 +28,13 @@ from paddle.fluid.contrib.slim import Compressor ...@@ -28,11 +28,13 @@ from paddle.fluid.contrib.slim import Compressor
from paddle.fluid.framework import IrGraph from paddle.fluid.framework import IrGraph
from paddle.fluid import core from paddle.fluid import core
def set_paddle_flags(**kwargs): def set_paddle_flags(**kwargs):
for key, value in kwargs.items(): for key, value in kwargs.items():
if os.environ.get(key, None) is None: if os.environ.get(key, None) is None:
os.environ[key] = str(value) os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before # NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect. # `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags( set_paddle_flags(
...@@ -55,6 +57,8 @@ import logging ...@@ -55,6 +57,8 @@ import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s' FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT) logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
""" """
Run evaluation program, return program outputs. Run evaluation program, return program outputs.
...@@ -73,8 +77,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): ...@@ -73,8 +77,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
has_bbox = 'bbox' in keys has_bbox = 'bbox' in keys
for data in reader(): for data in reader():
data = test_feed.feed(data) data = test_feed.feed(data)
feed_data = {'image': data['image'], feed_data = {'image': data['image'], 'im_size': data['im_size']}
'im_size': data['im_size']}
outs = exe.run(compile_program, outs = exe.run(compile_program,
feed=feed_data, feed=feed_data,
fetch_list=[values[0]], fetch_list=[values[0]],
...@@ -155,16 +158,14 @@ def main(): ...@@ -155,16 +158,14 @@ def main():
optimizer = optim_builder(lr) optimizer = optim_builder(lr)
optimizer.minimize(loss) optimizer.minimize(loss)
train_reader = create_reader(train_feed, cfg.max_iters, FLAGS.dataset_dir)
train_reader = create_reader(train_feed, cfg.max_iters,
FLAGS.dataset_dir)
train_loader.set_sample_list_generator(train_reader, place) train_loader.set_sample_list_generator(train_reader, place)
# parse train fetches # parse train fetches
train_keys, train_values, _ = parse_fetches(train_fetches) train_keys, train_values, _ = parse_fetches(train_fetches)
train_values.append(lr) train_values.append(lr)
train_fetch_list=[] train_fetch_list = []
for k, v in zip(train_keys, train_values): for k, v in zip(train_keys, train_values):
train_fetch_list.append((k, v)) train_fetch_list.append((k, v))
print("train_fetch_list: {}".format(train_fetch_list)) print("train_fetch_list: {}".format(train_fetch_list))
...@@ -188,18 +189,16 @@ def main(): ...@@ -188,18 +189,16 @@ def main():
if cfg.metric == 'VOC': if cfg.metric == 'VOC':
extra_keys = ['gt_box', 'gt_label', 'is_difficult'] extra_keys = ['gt_box', 'gt_label', 'is_difficult']
eval_keys, eval_values, eval_cls = parse_fetches(fetches, eval_prog, eval_keys, eval_values, eval_cls = parse_fetches(fetches, eval_prog,
extra_keys) extra_keys)
# print(eval_values) # print(eval_values)
eval_fetch_list=[] eval_fetch_list = []
for k, v in zip(eval_keys, eval_values): for k, v in zip(eval_keys, eval_values):
eval_fetch_list.append((k, v)) eval_fetch_list.append((k, v))
exe.run(startup_prog) exe.run(startup_prog)
start_iter = 0 start_iter = 0
checkpoint.load_params(exe, train_prog, cfg.pretrain_weights) checkpoint.load_params(exe, train_prog, cfg.pretrain_weights)
best_box_ap_list = [] best_box_ap_list = []
...@@ -208,20 +207,20 @@ def main(): ...@@ -208,20 +207,20 @@ def main():
#place = fluid.CPUPlace() #place = fluid.CPUPlace()
#exe = fluid.Executor(place) #exe = fluid.Executor(place)
results = eval_run(exe, program, eval_reader, results = eval_run(exe, program, eval_reader, eval_keys, eval_values,
eval_keys, eval_values, eval_cls, test_data_feed) eval_cls, test_data_feed)
resolution = None resolution = None
if 'mask' in results[0]: if 'mask' in results[0]:
resolution = model.mask_head.resolution resolution = model.mask_head.resolution
box_ap_stats = eval_results(results, eval_feed, cfg.metric, cfg.num_classes, box_ap_stats = eval_results(results, eval_feed, cfg.metric,
resolution, False, FLAGS.output_eval) cfg.num_classes, resolution, False,
FLAGS.output_eval)
if len(best_box_ap_list) == 0: if len(best_box_ap_list) == 0:
best_box_ap_list.append(box_ap_stats[0]) best_box_ap_list.append(box_ap_stats[0])
elif box_ap_stats[0] > best_box_ap_list[0]: elif box_ap_stats[0] > best_box_ap_list[0]:
best_box_ap_list[0] = box_ap_stats[0] best_box_ap_list[0] = box_ap_stats[0]
logger.info("Best test box ap: {}".format( logger.info("Best test box ap: {}".format(best_box_ap_list[0]))
best_box_ap_list[0]))
return best_box_ap_list[0] return best_box_ap_list[0]
test_feed = [('image', test_feed_vars['image'].name), test_feed = [('image', test_feed_vars['image'].name),
...@@ -239,13 +238,12 @@ def main(): ...@@ -239,13 +238,12 @@ def main():
eval_feed_list=test_feed, eval_feed_list=test_feed,
eval_func={'map': eval_func}, eval_func={'map': eval_func},
eval_fetch_list=[eval_fetch_list[0]], eval_fetch_list=[eval_fetch_list[0]],
prune_infer_model=[["image", "im_size"],["multiclass_nms_0.tmp_0"]], prune_infer_model=[["image", "im_size"], ["multiclass_nms_0.tmp_0"]],
train_optimizer=None) train_optimizer=None)
com.config(FLAGS.slim_file) com.config(FLAGS.slim_file)
com.run() com.run()
if __name__ == '__main__': if __name__ == '__main__':
parser = ArgsParser() parser = ArgsParser()
parser.add_argument( parser.add_argument(
......
...@@ -32,11 +32,13 @@ from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass ...@@ -32,11 +32,13 @@ from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
def set_paddle_flags(**kwargs): def set_paddle_flags(**kwargs):
for key, value in kwargs.items(): for key, value in kwargs.items():
if os.environ.get(key, None) is None: if os.environ.get(key, None) is None:
os.environ[key] = str(value) os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before # NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect. # `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags( set_paddle_flags(
...@@ -59,6 +61,8 @@ import logging ...@@ -59,6 +61,8 @@ import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s' FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT) logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
""" """
Run evaluation program, return program outputs. Run evaluation program, return program outputs.
...@@ -71,8 +75,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): ...@@ -71,8 +75,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
has_bbox = 'bbox' in keys has_bbox = 'bbox' in keys
for data in reader(): for data in reader():
data = test_feed.feed(data) data = test_feed.feed(data)
feed_data = {'image': data['image'], feed_data = {'image': data['image'], 'im_size': data['im_size']}
'im_size': data['im_size']}
outs = exe.run(compile_program, outs = exe.run(compile_program,
feed=feed_data, feed=feed_data,
fetch_list=values[0], fetch_list=values[0],
...@@ -123,7 +126,6 @@ def main(): ...@@ -123,7 +126,6 @@ def main():
devices_num = int( devices_num = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count())) os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
if 'eval_feed' not in cfg: if 'eval_feed' not in cfg:
eval_feed = create(main_arch + 'EvalFeed') eval_feed = create(main_arch + 'EvalFeed')
else: else:
...@@ -138,85 +140,78 @@ def main(): ...@@ -138,85 +140,78 @@ def main():
#eval_pyreader.decorate_sample_list_generator(eval_reader, place) #eval_pyreader.decorate_sample_list_generator(eval_reader, place)
test_data_feed = fluid.DataFeeder(test_feed_vars.values(), place) test_data_feed = fluid.DataFeeder(test_feed_vars.values(), place)
assert os.path.exists(FLAGS.model_path) assert os.path.exists(FLAGS.model_path)
infer_prog, feed_names, fetch_targets = fluid.io.load_inference_model( infer_prog, feed_names, fetch_targets = fluid.io.load_inference_model(
dirname=FLAGS.model_path, executor=exe, dirname=FLAGS.model_path,
model_filename='__model__.infer', executor=exe,
params_filename='__params__') model_filename='__model__.infer',
params_filename='__params__')
eval_keys = ['bbox', 'gt_box', 'gt_label', 'is_difficult'] eval_keys = ['bbox', 'gt_box', 'gt_label', 'is_difficult']
eval_values = ['multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult'] eval_values = [
'multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult'
]
eval_cls = [] eval_cls = []
eval_values[0] = fetch_targets[0] eval_values[0] = fetch_targets[0]
results = eval_run(exe, infer_prog, eval_reader, results = eval_run(exe, infer_prog, eval_reader, eval_keys, eval_values,
eval_keys, eval_values, eval_cls, test_data_feed) eval_cls, test_data_feed)
resolution = None resolution = None
if 'mask' in results[0]: if 'mask' in results[0]:
resolution = model.mask_head.resolution resolution = model.mask_head.resolution
box_ap_stats = eval_results(results, eval_feed, cfg.metric, cfg.num_classes, box_ap_stats = eval_results(results, eval_feed, cfg.metric, cfg.num_classes,
resolution, False, FLAGS.output_eval) resolution, False, FLAGS.output_eval)
logger.info("freeze the graph for inference") logger.info("freeze the graph for inference")
test_graph = IrGraph(core.Graph(infer_prog.desc), for_test=True) test_graph = IrGraph(core.Graph(infer_prog.desc), for_test=True)
freeze_pass = QuantizationFreezePass( freeze_pass = QuantizationFreezePass(
scope=fluid.global_scope(), scope=fluid.global_scope(),
place=place, place=place,
weight_quantize_type=FLAGS.weight_quant_type) weight_quantize_type=FLAGS.weight_quant_type)
freeze_pass.apply(test_graph) freeze_pass.apply(test_graph)
server_program = test_graph.to_program() server_program = test_graph.to_program()
fluid.io.save_inference_model( fluid.io.save_inference_model(
dirname=os.path.join(FLAGS.save_path, 'float'), dirname=os.path.join(FLAGS.save_path, 'float'),
feeded_var_names=feed_names, feeded_var_names=feed_names,
target_vars=fetch_targets, target_vars=fetch_targets,
executor=exe, executor=exe,
main_program=server_program, main_program=server_program,
model_filename='model', model_filename='model',
params_filename='weights') params_filename='weights')
logger.info("convert the weights into int8 type") logger.info("convert the weights into int8 type")
convert_int8_pass = ConvertToInt8Pass( convert_int8_pass = ConvertToInt8Pass(
scope=fluid.global_scope(), scope=fluid.global_scope(), place=place)
place=place)
convert_int8_pass.apply(test_graph) convert_int8_pass.apply(test_graph)
server_int8_program = test_graph.to_program() server_int8_program = test_graph.to_program()
fluid.io.save_inference_model( fluid.io.save_inference_model(
dirname=os.path.join(FLAGS.save_path, 'int8'), dirname=os.path.join(FLAGS.save_path, 'int8'),
feeded_var_names=feed_names, feeded_var_names=feed_names,
target_vars=fetch_targets, target_vars=fetch_targets,
executor=exe, executor=exe,
main_program=server_int8_program, main_program=server_int8_program,
model_filename='model', model_filename='model',
params_filename='weights') params_filename='weights')
logger.info("convert the freezed pass to paddle-lite execution") logger.info("convert the freezed pass to paddle-lite execution")
mobile_pass = TransformForMobilePass() mobile_pass = TransformForMobilePass()
mobile_pass.apply(test_graph) mobile_pass.apply(test_graph)
mobile_program = test_graph.to_program() mobile_program = test_graph.to_program()
fluid.io.save_inference_model( fluid.io.save_inference_model(
dirname=os.path.join(FLAGS.save_path, 'mobile'), dirname=os.path.join(FLAGS.save_path, 'mobile'),
feeded_var_names=feed_names, feeded_var_names=feed_names,
target_vars=fetch_targets, target_vars=fetch_targets,
executor=exe, executor=exe,
main_program=mobile_program, main_program=mobile_program,
model_filename='model', model_filename='model',
params_filename='weights') params_filename='weights')
if __name__ == '__main__': if __name__ == '__main__':
parser = ArgsParser() parser = ArgsParser()
parser.add_argument( parser.add_argument(
"-m", "-m", "--model_path", default=None, type=str, help="path of checkpoint")
"--model_path",
default=None,
type=str,
help="path of checkpoint")
parser.add_argument( parser.add_argument(
"--output_eval", "--output_eval",
default=None, default=None,
......
...@@ -33,22 +33,24 @@ add_arg('model_name', str, "__model__", "model filename for inference model") ...@@ -33,22 +33,24 @@ add_arg('model_name', str, "__model__", "model filename for inference model")
add_arg('params_name', str, "__params__", "params filename for inference model") add_arg('params_name', str, "__params__", "params filename for inference model")
# yapf: enable # yapf: enable
def eval(args): def eval(args):
# parameters from arguments # parameters from arguments
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
val_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(args.model_path, val_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(
exe, args.model_path,
model_filename=args.model_name, exe,
params_filename=args.params_name) model_filename=args.model_name,
params_filename=args.params_name)
val_reader = paddle.batch(reader.val(), batch_size=128) val_reader = paddle.batch(reader.val(), batch_size=128)
feeder = fluid.DataFeeder(place=place, feed_list=feed_target_names, program=val_program) feeder = fluid.DataFeeder(
place=place, feed_list=feed_target_names, program=val_program)
results=[] results = []
for batch_id, data in enumerate(val_reader()): for batch_id, data in enumerate(val_reader()):
# top1_acc, top5_acc # top1_acc, top5_acc
if len(feed_target_names) == 1: if len(feed_target_names) == 1:
# eval "infer model", which input is image, output is classification probability # eval "infer model", which input is image, output is classification probability
...@@ -56,8 +58,8 @@ def eval(args): ...@@ -56,8 +58,8 @@ def eval(args):
label = [[d[1]] for d in data] label = [[d[1]] for d in data]
feed_data = feeder.feed(image) feed_data = feeder.feed(image)
pred = exe.run(val_program, pred = exe.run(val_program,
feed=feed_data, feed=feed_data,
fetch_list=fetch_targets) fetch_list=fetch_targets)
pred = np.array(pred[0]) pred = np.array(pred[0])
label = np.array(label) label = np.array(label)
sort_array = pred.argsort(axis=1) sort_array = pred.argsort(axis=1)
...@@ -73,18 +75,20 @@ def eval(args): ...@@ -73,18 +75,20 @@ def eval(args):
else: else:
# eval "eval model", which inputs are image and label, output is top1 and top5 accuracy # eval "eval model", which inputs are image and label, output is top1 and top5 accuracy
result = exe.run(val_program, result = exe.run(val_program,
feed=feeder.feed(data), feed=feeder.feed(data),
fetch_list=fetch_targets) fetch_list=fetch_targets)
result = [np.mean(r) for r in result] result = [np.mean(r) for r in result]
results.append(result) results.append(result)
result = np.mean(np.array(results), axis=0) result = np.mean(np.array(results), axis=0)
print("top1_acc/top5_acc= {}".format(result)) print("top1_acc/top5_acc= {}".format(result))
sys.stdout.flush() sys.stdout.flush()
def main(): def main():
args = parser.parse_args() args = parser.parse_args()
print_arguments(args) print_arguments(args)
eval(args) eval(args)
if __name__ == '__main__': if __name__ == '__main__':
main() main()
...@@ -34,20 +34,24 @@ add_arg('model_name', str, "__model__.infer", "inference model filename") ...@@ -34,20 +34,24 @@ add_arg('model_name', str, "__model__.infer", "inference model filename")
add_arg('params_name', str, "__params__", "inference model params filename") add_arg('params_name', str, "__params__", "inference model params filename")
# yapf: enable # yapf: enable
def infer(args): def infer(args):
# parameters from arguments # parameters from arguments
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
test_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(args.model_path, test_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(
exe, args.model_path,
model_filename=args.model_name, exe,
params_filename=args.params_name) model_filename=args.model_name,
params_filename=args.params_name)
test_reader = paddle.batch(reader.test(), batch_size=1) test_reader = paddle.batch(reader.test(), batch_size=1)
feeder = fluid.DataFeeder(place=place, feed_list=feed_target_names, program=test_program) feeder = fluid.DataFeeder(
place=place, feed_list=feed_target_names, program=test_program)
results = []
results=[]
#for infer time, if you don't need, please change infer_time to False #for infer time, if you don't need, please change infer_time to False
infer_time = True infer_time = True
compile_prog = fluid.compiler.CompiledProgram(test_program) compile_prog = fluid.compiler.CompiledProgram(test_program)
...@@ -58,28 +62,27 @@ def infer(args): ...@@ -58,28 +62,27 @@ def infer(args):
repeats_time = 100 repeats_time = 100
feed_data = feeder.feed(data) feed_data = feeder.feed(data)
for i in range(warmup_times): for i in range(warmup_times):
exe.run(compile_prog, exe.run(compile_prog, feed=feed_data, fetch_list=fetch_targets)
feed=feed_data,
fetch_list=fetch_targets)
start_time = time.time() start_time = time.time()
for i in range(repeats_time): for i in range(repeats_time):
exe.run(compile_prog, exe.run(compile_prog, feed=feed_data, fetch_list=fetch_targets)
feed=feed_data, print("infer time: {} ms/sample".format((time.time() - start_time) *
fetch_list=fetch_targets) 1000 / repeats_time))
print("infer time: {} ms/sample".format((time.time()-start_time) * 1000 / repeats_time))
infer_time = False infer_time = False
# top1_acc, top5_acc # top1_acc, top5_acc
result = exe.run(compile_prog, result = exe.run(compile_prog,
feed=feeder.feed(data), feed=feeder.feed(data),
fetch_list=fetch_targets) fetch_list=fetch_targets)
result = np.array(result[0]) result = np.array(result[0])
print(result.argsort(axis=1)[:,-1:][::-1]) print(result.argsort(axis=1)[:, -1:][::-1])
sys.stdout.flush() sys.stdout.flush()
def main(): def main():
args = parser.parse_args() args = parser.parse_args()
print_arguments(args) print_arguments(args)
infer(args) infer(args)
if __name__ == '__main__': if __name__ == '__main__':
main() main()
...@@ -38,7 +38,8 @@ def compress(args): ...@@ -38,7 +38,8 @@ def compress(args):
image_shape = "3,224,224" image_shape = "3,224,224"
image_shape = [int(m) for m in image_shape.split(",")] image_shape = [int(m) for m in image_shape.split(",")]
image = fluid.data(name='image', shape=[None] + image_shape, dtype='float32') image = fluid.data(
name='image', shape=[None] + image_shape, dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64') label = fluid.data(name='label', shape=[None, 1], dtype='int64')
# model definition # model definition
model = models.__dict__[args.model]() model = models.__dict__[args.model]()
...@@ -99,6 +100,7 @@ def compress(args): ...@@ -99,6 +100,7 @@ def compress(args):
distiller_optimizer=None) distiller_optimizer=None)
com_pass.config(args.config_file) com_pass.config(args.config_file)
com_pass.run() com_pass.run()
conv_op_num = 0 conv_op_num = 0
fake_quant_op_num = 0 fake_quant_op_num = 0
for op in com_pass.context.eval_graph.ops(): for op in com_pass.context.eval_graph.ops():
...@@ -110,7 +112,6 @@ def compress(args): ...@@ -110,7 +112,6 @@ def compress(args):
print('fake quant op num {}'.format(fake_quant_op_num)) print('fake quant op num {}'.format(fake_quant_op_num))
def main(): def main():
args = parser.parse_args() args = parser.parse_args()
print_arguments(args) print_arguments(args)
......
...@@ -45,27 +45,28 @@ add_arg('save_path', str, './output', 'Path to save inference model') ...@@ -45,27 +45,28 @@ add_arg('save_path', str, './output', 'Path to save inference model')
add_arg('weight_quant_type', str, 'abs_max', 'quantization type for weight') add_arg('weight_quant_type', str, 'abs_max', 'quantization type for weight')
# yapf: enable # yapf: enable
def eval(args): def eval(args):
# parameters from arguments # parameters from arguments
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
val_program, feed_names, fetch_targets = fluid.io.load_inference_model(args.model_path, val_program, feed_names, fetch_targets = fluid.io.load_inference_model(
exe, args.model_path,
model_filename="__model__.infer", exe,
params_filename="__params__") model_filename="__model__.infer",
params_filename="__params__")
val_reader = paddle.batch(reader.val(), batch_size=128) val_reader = paddle.batch(reader.val(), batch_size=128)
feeder = fluid.DataFeeder(place=place, feed_list=feed_names, program=val_program) feeder = fluid.DataFeeder(
place=place, feed_list=feed_names, program=val_program)
results=[] results = []
for batch_id, data in enumerate(val_reader()): for batch_id, data in enumerate(val_reader()):
image = [[d[0]] for d in data] image = [[d[0]] for d in data]
label = [[d[1]] for d in data] label = [[d[1]] for d in data]
feed_data = feeder.feed(image) feed_data = feeder.feed(image)
pred = exe.run(val_program, pred = exe.run(val_program, feed=feed_data, fetch_list=fetch_targets)
feed=feed_data,
fetch_list=fetch_targets)
pred = np.array(pred[0]) pred = np.array(pred[0])
label = np.array(label) label = np.array(label)
sort_array = pred.argsort(axis=1) sort_array = pred.argsort(axis=1)
...@@ -82,56 +83,58 @@ def eval(args): ...@@ -82,56 +83,58 @@ def eval(args):
result = np.mean(np.array(results), axis=0) result = np.mean(np.array(results), axis=0)
print("top1_acc/top5_acc= {}".format(result)) print("top1_acc/top5_acc= {}".format(result))
sys.stdout.flush() sys.stdout.flush()
_logger.info("freeze the graph for inference") _logger.info("freeze the graph for inference")
test_graph = IrGraph(core.Graph(val_program.desc), for_test=True) test_graph = IrGraph(core.Graph(val_program.desc), for_test=True)
freeze_pass = QuantizationFreezePass( freeze_pass = QuantizationFreezePass(
scope=fluid.global_scope(), scope=fluid.global_scope(),
place=place, place=place,
weight_quantize_type=args.weight_quant_type) weight_quantize_type=args.weight_quant_type)
freeze_pass.apply(test_graph) freeze_pass.apply(test_graph)
server_program = test_graph.to_program() server_program = test_graph.to_program()
fluid.io.save_inference_model( fluid.io.save_inference_model(
dirname=os.path.join(args.save_path, 'float'), dirname=os.path.join(args.save_path, 'float'),
feeded_var_names=feed_names, feeded_var_names=feed_names,
target_vars=fetch_targets, target_vars=fetch_targets,
executor=exe, executor=exe,
main_program=server_program, main_program=server_program,
model_filename='model', model_filename='model',
params_filename='weights') params_filename='weights')
_logger.info("convert the weights into int8 type") _logger.info("convert the weights into int8 type")
convert_int8_pass = ConvertToInt8Pass( convert_int8_pass = ConvertToInt8Pass(
scope=fluid.global_scope(), scope=fluid.global_scope(), place=place)
place=place)
convert_int8_pass.apply(test_graph) convert_int8_pass.apply(test_graph)
server_int8_program = test_graph.to_program() server_int8_program = test_graph.to_program()
fluid.io.save_inference_model( fluid.io.save_inference_model(
dirname=os.path.join(args.save_path, 'int8'), dirname=os.path.join(args.save_path, 'int8'),
feeded_var_names=feed_names, feeded_var_names=feed_names,
target_vars=fetch_targets, target_vars=fetch_targets,
executor=exe, executor=exe,
main_program=server_int8_program, main_program=server_int8_program,
model_filename='model', model_filename='model',
params_filename='weights') params_filename='weights')
_logger.info("convert the freezed pass to paddle-lite execution") _logger.info("convert the freezed pass to paddle-lite execution")
mobile_pass = TransformForMobilePass() mobile_pass = TransformForMobilePass()
mobile_pass.apply(test_graph) mobile_pass.apply(test_graph)
mobile_program = test_graph.to_program() mobile_program = test_graph.to_program()
fluid.io.save_inference_model( fluid.io.save_inference_model(
dirname=os.path.join(args.save_path, 'mobile'), dirname=os.path.join(args.save_path, 'mobile'),
feeded_var_names=feed_names, feeded_var_names=feed_names,
target_vars=fetch_targets, target_vars=fetch_targets,
executor=exe, executor=exe,
main_program=mobile_program, main_program=mobile_program,
model_filename='model', model_filename='model',
params_filename='weights') params_filename='weights')
def main(): def main():
args = parser.parse_args() args = parser.parse_args()
print_arguments(args) print_arguments(args)
eval(args) eval(args)
if __name__ == '__main__': if __name__ == '__main__':
main() main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册