提交 c2d68974 编写于 作者: L Liufang Sang 提交者: Bai Yifan

[PaddleSlim] fix details in code format (#3766)

* fix details test=release/1.6

* fix code format  test=release/1.6
上级 a8cf62b1
......@@ -32,11 +32,13 @@ from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
......@@ -59,6 +61,8 @@ import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
"""
Run evaluation program, return program outputs.
......@@ -71,8 +75,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
has_bbox = 'bbox' in keys
for data in reader():
data = test_feed.feed(data)
feed_data = {'image': data['image'],
'im_size': data['im_size']}
feed_data = {'image': data['image'], 'im_size': data['im_size']}
outs = exe.run(compile_program,
feed=feed_data,
fetch_list=values[0],
......@@ -123,7 +126,6 @@ def main():
devices_num = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
if 'eval_feed' not in cfg:
eval_feed = create(main_arch + 'EvalFeed')
else:
......@@ -135,39 +137,36 @@ def main():
_, test_feed_vars = create_feed(eval_feed, iterable=True)
eval_reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir)
#eval_pyreader.decorate_sample_list_generator(eval_reader, place)
test_data_feed = fluid.DataFeeder(test_feed_vars.values(), place)
assert os.path.exists(FLAGS.model_path)
infer_prog, feed_names, fetch_targets = fluid.io.load_inference_model(
dirname=FLAGS.model_path, executor=exe,
dirname=FLAGS.model_path,
executor=exe,
model_filename=FLAGS.model_name,
params_filename=FLAGS.params_name)
eval_keys = ['bbox', 'gt_box', 'gt_label', 'is_difficult']
eval_values = ['multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult']
eval_values = [
'multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult'
]
eval_cls = []
eval_values[0] = fetch_targets[0]
results = eval_run(exe, infer_prog, eval_reader,
eval_keys, eval_values, eval_cls, test_data_feed)
results = eval_run(exe, infer_prog, eval_reader, eval_keys, eval_values,
eval_cls, test_data_feed)
resolution = None
if 'mask' in results[0]:
resolution = model.mask_head.resolution
eval_results(results, eval_feed, cfg.metric, cfg.num_classes,
resolution, False, FLAGS.output_eval)
eval_results(results, eval_feed, cfg.metric, cfg.num_classes, resolution,
False, FLAGS.output_eval)
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"-m",
"--model_path",
default=None,
type=str,
help="path of checkpoint")
"-m", "--model_path", default=None, type=str, help="path of checkpoint")
parser.add_argument(
"--output_eval",
default=None,
......
......@@ -25,6 +25,7 @@ import numpy as np
from PIL import Image
sys.path.append("../../")
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
......@@ -119,19 +120,18 @@ def main():
test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)
test_feed.dataset.add_images(test_images)
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
infer_prog, feed_var_names, fetch_list = fluid.io.load_inference_model(
dirname=FLAGS.model_path, model_filename=FLAGS.model_name,
dirname=FLAGS.model_path,
model_filename=FLAGS.model_name,
params_filename=FLAGS.params_name,
executor=exe)
reader = create_reader(test_feed)
feeder = fluid.DataFeeder(place=place, feed_list=feed_var_names,
program=infer_prog)
feeder = fluid.DataFeeder(
place=place, feed_list=feed_var_names, program=infer_prog)
# parse infer fetches
assert cfg.metric in ['COCO', 'VOC'], \
......@@ -141,7 +141,9 @@ def main():
extra_keys = ['im_info', 'im_id', 'im_shape']
if cfg['metric'] == 'VOC':
extra_keys = ['im_id', 'im_shape']
keys, values, _ = parse_fetches({'bbox':fetch_list}, infer_prog, extra_keys)
keys, values, _ = parse_fetches({
'bbox': fetch_list
}, infer_prog, extra_keys)
# parse dataset category
if cfg.metric == 'COCO':
......@@ -176,7 +178,7 @@ def main():
if infer_time:
warmup_times = 10
repeats_time = 100
feed_data_dict = feeder.feed(feed_data);
feed_data_dict = feeder.feed(feed_data)
for i in range(warmup_times):
exe.run(compile_prog,
feed=feed_data_dict,
......@@ -189,7 +191,8 @@ def main():
fetch_list=fetch_list,
return_numpy=False)
print("infer time: {} ms/sample".format((time.time()-start_time) * 1000 / repeats_time))
print("infer time: {} ms/sample".format((time.time() - start_time) *
1000 / repeats_time))
infer_time = False
outs = exe.run(compile_prog,
......@@ -282,10 +285,7 @@ if __name__ == '__main__':
default="tb_log_dir/image",
help='Tensorboard logging directory for image.')
parser.add_argument(
'--model_path',
type=str,
default=None,
help="inference model path")
'--model_path', type=str, default=None, help="inference model path")
parser.add_argument(
'--model_name',
type=str,
......
......@@ -28,11 +28,13 @@ from paddle.fluid.contrib.slim import Compressor
from paddle.fluid.framework import IrGraph
from paddle.fluid import core
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
......@@ -55,6 +57,8 @@ import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
"""
Run evaluation program, return program outputs.
......@@ -73,8 +77,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
has_bbox = 'bbox' in keys
for data in reader():
data = test_feed.feed(data)
feed_data = {'image': data['image'],
'im_size': data['im_size']}
feed_data = {'image': data['image'], 'im_size': data['im_size']}
outs = exe.run(compile_program,
feed=feed_data,
fetch_list=[values[0]],
......@@ -155,16 +158,14 @@ def main():
optimizer = optim_builder(lr)
optimizer.minimize(loss)
train_reader = create_reader(train_feed, cfg.max_iters,
FLAGS.dataset_dir)
train_reader = create_reader(train_feed, cfg.max_iters, FLAGS.dataset_dir)
train_loader.set_sample_list_generator(train_reader, place)
# parse train fetches
train_keys, train_values, _ = parse_fetches(train_fetches)
train_values.append(lr)
train_fetch_list=[]
train_fetch_list = []
for k, v in zip(train_keys, train_values):
train_fetch_list.append((k, v))
print("train_fetch_list: {}".format(train_fetch_list))
......@@ -191,15 +192,13 @@ def main():
extra_keys)
# print(eval_values)
eval_fetch_list=[]
eval_fetch_list = []
for k, v in zip(eval_keys, eval_values):
eval_fetch_list.append((k, v))
exe.run(startup_prog)
start_iter = 0
checkpoint.load_params(exe, train_prog, cfg.pretrain_weights)
best_box_ap_list = []
......@@ -208,20 +207,20 @@ def main():
#place = fluid.CPUPlace()
#exe = fluid.Executor(place)
results = eval_run(exe, program, eval_reader,
eval_keys, eval_values, eval_cls, test_data_feed)
results = eval_run(exe, program, eval_reader, eval_keys, eval_values,
eval_cls, test_data_feed)
resolution = None
if 'mask' in results[0]:
resolution = model.mask_head.resolution
box_ap_stats = eval_results(results, eval_feed, cfg.metric, cfg.num_classes,
resolution, False, FLAGS.output_eval)
box_ap_stats = eval_results(results, eval_feed, cfg.metric,
cfg.num_classes, resolution, False,
FLAGS.output_eval)
if len(best_box_ap_list) == 0:
best_box_ap_list.append(box_ap_stats[0])
elif box_ap_stats[0] > best_box_ap_list[0]:
best_box_ap_list[0] = box_ap_stats[0]
logger.info("Best test box ap: {}".format(
best_box_ap_list[0]))
logger.info("Best test box ap: {}".format(best_box_ap_list[0]))
return best_box_ap_list[0]
test_feed = [('image', test_feed_vars['image'].name),
......@@ -239,13 +238,12 @@ def main():
eval_feed_list=test_feed,
eval_func={'map': eval_func},
eval_fetch_list=[eval_fetch_list[0]],
prune_infer_model=[["image", "im_size"],["multiclass_nms_0.tmp_0"]],
prune_infer_model=[["image", "im_size"], ["multiclass_nms_0.tmp_0"]],
train_optimizer=None)
com.config(FLAGS.slim_file)
com.run()
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
......
......@@ -32,11 +32,13 @@ from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
def set_paddle_flags(**kwargs):
for key, value in kwargs.items():
if os.environ.get(key, None) is None:
os.environ[key] = str(value)
# NOTE(paddle-dev): All of these flags should be set before
# `import paddle`. Otherwise, it would not take any effect.
set_paddle_flags(
......@@ -59,6 +61,8 @@ import logging
FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
"""
Run evaluation program, return program outputs.
......@@ -71,8 +75,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed):
has_bbox = 'bbox' in keys
for data in reader():
data = test_feed.feed(data)
feed_data = {'image': data['image'],
'im_size': data['im_size']}
feed_data = {'image': data['image'], 'im_size': data['im_size']}
outs = exe.run(compile_program,
feed=feed_data,
fetch_list=values[0],
......@@ -123,7 +126,6 @@ def main():
devices_num = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
if 'eval_feed' not in cfg:
eval_feed = create(main_arch + 'EvalFeed')
else:
......@@ -138,21 +140,22 @@ def main():
#eval_pyreader.decorate_sample_list_generator(eval_reader, place)
test_data_feed = fluid.DataFeeder(test_feed_vars.values(), place)
assert os.path.exists(FLAGS.model_path)
infer_prog, feed_names, fetch_targets = fluid.io.load_inference_model(
dirname=FLAGS.model_path, executor=exe,
dirname=FLAGS.model_path,
executor=exe,
model_filename='__model__.infer',
params_filename='__params__')
eval_keys = ['bbox', 'gt_box', 'gt_label', 'is_difficult']
eval_values = ['multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult']
eval_values = [
'multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult'
]
eval_cls = []
eval_values[0] = fetch_targets[0]
results = eval_run(exe, infer_prog, eval_reader,
eval_keys, eval_values, eval_cls, test_data_feed)
results = eval_run(exe, infer_prog, eval_reader, eval_keys, eval_values,
eval_cls, test_data_feed)
resolution = None
if 'mask' in results[0]:
resolution = model.mask_head.resolution
......@@ -179,8 +182,7 @@ def main():
logger.info("convert the weights into int8 type")
convert_int8_pass = ConvertToInt8Pass(
scope=fluid.global_scope(),
place=place)
scope=fluid.global_scope(), place=place)
convert_int8_pass.apply(test_graph)
server_int8_program = test_graph.to_program()
fluid.io.save_inference_model(
......@@ -206,17 +208,10 @@ def main():
params_filename='weights')
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"-m",
"--model_path",
default=None,
type=str,
help="path of checkpoint")
"-m", "--model_path", default=None, type=str, help="path of checkpoint")
parser.add_argument(
"--output_eval",
default=None,
......
......@@ -33,22 +33,24 @@ add_arg('model_name', str, "__model__", "model filename for inference model")
add_arg('params_name', str, "__params__", "params filename for inference model")
# yapf: enable
def eval(args):
# parameters from arguments
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
val_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(args.model_path,
val_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(
args.model_path,
exe,
model_filename=args.model_name,
params_filename=args.params_name)
val_reader = paddle.batch(reader.val(), batch_size=128)
feeder = fluid.DataFeeder(place=place, feed_list=feed_target_names, program=val_program)
feeder = fluid.DataFeeder(
place=place, feed_list=feed_target_names, program=val_program)
results=[]
results = []
for batch_id, data in enumerate(val_reader()):
# top1_acc, top5_acc
if len(feed_target_names) == 1:
# eval "infer model", which input is image, output is classification probability
......@@ -81,10 +83,12 @@ def eval(args):
print("top1_acc/top5_acc= {}".format(result))
sys.stdout.flush()
def main():
args = parser.parse_args()
print_arguments(args)
eval(args)
if __name__ == '__main__':
main()
......@@ -34,20 +34,24 @@ add_arg('model_name', str, "__model__.infer", "inference model filename")
add_arg('params_name', str, "__params__", "inference model params filename")
# yapf: enable
def infer(args):
# parameters from arguments
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
test_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(args.model_path,
test_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(
args.model_path,
exe,
model_filename=args.model_name,
params_filename=args.params_name)
test_reader = paddle.batch(reader.test(), batch_size=1)
feeder = fluid.DataFeeder(place=place, feed_list=feed_target_names, program=test_program)
feeder = fluid.DataFeeder(
place=place, feed_list=feed_target_names, program=test_program)
results = []
results=[]
#for infer time, if you don't need, please change infer_time to False
infer_time = True
compile_prog = fluid.compiler.CompiledProgram(test_program)
......@@ -58,28 +62,27 @@ def infer(args):
repeats_time = 100
feed_data = feeder.feed(data)
for i in range(warmup_times):
exe.run(compile_prog,
feed=feed_data,
fetch_list=fetch_targets)
exe.run(compile_prog, feed=feed_data, fetch_list=fetch_targets)
start_time = time.time()
for i in range(repeats_time):
exe.run(compile_prog,
feed=feed_data,
fetch_list=fetch_targets)
print("infer time: {} ms/sample".format((time.time()-start_time) * 1000 / repeats_time))
exe.run(compile_prog, feed=feed_data, fetch_list=fetch_targets)
print("infer time: {} ms/sample".format((time.time() - start_time) *
1000 / repeats_time))
infer_time = False
# top1_acc, top5_acc
result = exe.run(compile_prog,
feed=feeder.feed(data),
fetch_list=fetch_targets)
result = np.array(result[0])
print(result.argsort(axis=1)[:,-1:][::-1])
print(result.argsort(axis=1)[:, -1:][::-1])
sys.stdout.flush()
def main():
args = parser.parse_args()
print_arguments(args)
infer(args)
if __name__ == '__main__':
main()
......@@ -38,7 +38,8 @@ def compress(args):
image_shape = "3,224,224"
image_shape = [int(m) for m in image_shape.split(",")]
image = fluid.data(name='image', shape=[None] + image_shape, dtype='float32')
image = fluid.data(
name='image', shape=[None] + image_shape, dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
# model definition
model = models.__dict__[args.model]()
......@@ -99,6 +100,7 @@ def compress(args):
distiller_optimizer=None)
com_pass.config(args.config_file)
com_pass.run()
conv_op_num = 0
fake_quant_op_num = 0
for op in com_pass.context.eval_graph.ops():
......@@ -110,7 +112,6 @@ def compress(args):
print('fake quant op num {}'.format(fake_quant_op_num))
def main():
args = parser.parse_args()
print_arguments(args)
......
......@@ -45,27 +45,28 @@ add_arg('save_path', str, './output', 'Path to save inference model')
add_arg('weight_quant_type', str, 'abs_max', 'quantization type for weight')
# yapf: enable
def eval(args):
# parameters from arguments
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
val_program, feed_names, fetch_targets = fluid.io.load_inference_model(args.model_path,
val_program, feed_names, fetch_targets = fluid.io.load_inference_model(
args.model_path,
exe,
model_filename="__model__.infer",
params_filename="__params__")
val_reader = paddle.batch(reader.val(), batch_size=128)
feeder = fluid.DataFeeder(place=place, feed_list=feed_names, program=val_program)
feeder = fluid.DataFeeder(
place=place, feed_list=feed_names, program=val_program)
results=[]
results = []
for batch_id, data in enumerate(val_reader()):
image = [[d[0]] for d in data]
label = [[d[1]] for d in data]
feed_data = feeder.feed(image)
pred = exe.run(val_program,
feed=feed_data,
fetch_list=fetch_targets)
pred = exe.run(val_program, feed=feed_data, fetch_list=fetch_targets)
pred = np.array(pred[0])
label = np.array(label)
sort_array = pred.argsort(axis=1)
......@@ -82,6 +83,7 @@ def eval(args):
result = np.mean(np.array(results), axis=0)
print("top1_acc/top5_acc= {}".format(result))
sys.stdout.flush()
_logger.info("freeze the graph for inference")
test_graph = IrGraph(core.Graph(val_program.desc), for_test=True)
......@@ -102,8 +104,7 @@ def eval(args):
_logger.info("convert the weights into int8 type")
convert_int8_pass = ConvertToInt8Pass(
scope=fluid.global_scope(),
place=place)
scope=fluid.global_scope(), place=place)
convert_int8_pass.apply(test_graph)
server_int8_program = test_graph.to_program()
fluid.io.save_inference_model(
......@@ -128,10 +129,12 @@ def eval(args):
model_filename='model',
params_filename='weights')
def main():
args = parser.parse_args()
print_arguments(args)
eval(args)
if __name__ == '__main__':
main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册