From c2d689742c44462edc6c03bcef75513fd2abc94f Mon Sep 17 00:00:00 2001 From: Liufang Sang Date: Fri, 25 Oct 2019 14:58:24 +0800 Subject: [PATCH] [PaddleSlim] fix details in code format (#3766) * fix details test=release/1.6 * fix code format test=release/1.6 --- PaddleCV/PaddleDetection/slim/eval.py | 35 ++++---- PaddleCV/PaddleDetection/slim/infer.py | 28 +++--- .../slim/quantization/compress.py | 34 ++++---- .../slim/quantization/freeze.py | 87 +++++++++---------- PaddleSlim/classification/eval.py | 26 +++--- PaddleSlim/classification/infer.py | 35 ++++---- .../classification/quantization/compress.py | 5 +- .../classification/quantization/freeze.py | 73 ++++++++-------- 8 files changed, 163 insertions(+), 160 deletions(-) diff --git a/PaddleCV/PaddleDetection/slim/eval.py b/PaddleCV/PaddleDetection/slim/eval.py index 333ed721..a0020314 100644 --- a/PaddleCV/PaddleDetection/slim/eval.py +++ b/PaddleCV/PaddleDetection/slim/eval.py @@ -32,11 +32,13 @@ from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass from paddle.fluid.contrib.slim.quantization import TransformForMobilePass + def set_paddle_flags(**kwargs): for key, value in kwargs.items(): if os.environ.get(key, None) is None: os.environ[key] = str(value) + # NOTE(paddle-dev): All of these flags should be set before # `import paddle`. Otherwise, it would not take any effect. set_paddle_flags( @@ -59,6 +61,8 @@ import logging FORMAT = '%(asctime)s-%(levelname)s: %(message)s' logging.basicConfig(level=logging.INFO, format=FORMAT) logger = logging.getLogger(__name__) + + def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): """ Run evaluation program, return program outputs. @@ -71,8 +75,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): has_bbox = 'bbox' in keys for data in reader(): data = test_feed.feed(data) - feed_data = {'image': data['image'], - 'im_size': data['im_size']} + feed_data = {'image': data['image'], 'im_size': data['im_size']} outs = exe.run(compile_program, feed=feed_data, fetch_list=values[0], @@ -123,7 +126,6 @@ def main(): devices_num = int( os.environ.get('CPU_NUM', multiprocessing.cpu_count())) - if 'eval_feed' not in cfg: eval_feed = create(main_arch + 'EvalFeed') else: @@ -135,39 +137,36 @@ def main(): _, test_feed_vars = create_feed(eval_feed, iterable=True) eval_reader = create_reader(eval_feed, args_path=FLAGS.dataset_dir) - #eval_pyreader.decorate_sample_list_generator(eval_reader, place) test_data_feed = fluid.DataFeeder(test_feed_vars.values(), place) - assert os.path.exists(FLAGS.model_path) infer_prog, feed_names, fetch_targets = fluid.io.load_inference_model( - dirname=FLAGS.model_path, executor=exe, - model_filename=FLAGS.model_name, - params_filename=FLAGS.params_name) + dirname=FLAGS.model_path, + executor=exe, + model_filename=FLAGS.model_name, + params_filename=FLAGS.params_name) eval_keys = ['bbox', 'gt_box', 'gt_label', 'is_difficult'] - eval_values = ['multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult'] + eval_values = [ + 'multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult' + ] eval_cls = [] eval_values[0] = fetch_targets[0] - results = eval_run(exe, infer_prog, eval_reader, - eval_keys, eval_values, eval_cls, test_data_feed) + results = eval_run(exe, infer_prog, eval_reader, eval_keys, eval_values, + eval_cls, test_data_feed) resolution = None if 'mask' in results[0]: resolution = model.mask_head.resolution - eval_results(results, eval_feed, cfg.metric, cfg.num_classes, - resolution, False, FLAGS.output_eval) + eval_results(results, eval_feed, cfg.metric, cfg.num_classes, resolution, + False, FLAGS.output_eval) if __name__ == '__main__': parser = ArgsParser() parser.add_argument( - "-m", - "--model_path", - default=None, - type=str, - help="path of checkpoint") + "-m", "--model_path", default=None, type=str, help="path of checkpoint") parser.add_argument( "--output_eval", default=None, diff --git a/PaddleCV/PaddleDetection/slim/infer.py b/PaddleCV/PaddleDetection/slim/infer.py index 438eb925..a5c00de9 100644 --- a/PaddleCV/PaddleDetection/slim/infer.py +++ b/PaddleCV/PaddleDetection/slim/infer.py @@ -25,6 +25,7 @@ import numpy as np from PIL import Image sys.path.append("../../") + def set_paddle_flags(**kwargs): for key, value in kwargs.items(): if os.environ.get(key, None) is None: @@ -118,20 +119,19 @@ def main(): test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img) test_feed.dataset.add_images(test_images) - place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) - infer_prog, feed_var_names, fetch_list = fluid.io.load_inference_model( - dirname=FLAGS.model_path, model_filename=FLAGS.model_name, - params_filename=FLAGS.params_name, - executor=exe) + dirname=FLAGS.model_path, + model_filename=FLAGS.model_name, + params_filename=FLAGS.params_name, + executor=exe) reader = create_reader(test_feed) - feeder = fluid.DataFeeder(place=place, feed_list=feed_var_names, - program=infer_prog) + feeder = fluid.DataFeeder( + place=place, feed_list=feed_var_names, program=infer_prog) # parse infer fetches assert cfg.metric in ['COCO', 'VOC'], \ @@ -141,7 +141,9 @@ def main(): extra_keys = ['im_info', 'im_id', 'im_shape'] if cfg['metric'] == 'VOC': extra_keys = ['im_id', 'im_shape'] - keys, values, _ = parse_fetches({'bbox':fetch_list}, infer_prog, extra_keys) + keys, values, _ = parse_fetches({ + 'bbox': fetch_list + }, infer_prog, extra_keys) # parse dataset category if cfg.metric == 'COCO': @@ -176,7 +178,7 @@ def main(): if infer_time: warmup_times = 10 repeats_time = 100 - feed_data_dict = feeder.feed(feed_data); + feed_data_dict = feeder.feed(feed_data) for i in range(warmup_times): exe.run(compile_prog, feed=feed_data_dict, @@ -189,7 +191,8 @@ def main(): fetch_list=fetch_list, return_numpy=False) - print("infer time: {} ms/sample".format((time.time()-start_time) * 1000 / repeats_time)) + print("infer time: {} ms/sample".format((time.time() - start_time) * + 1000 / repeats_time)) infer_time = False outs = exe.run(compile_prog, @@ -282,10 +285,7 @@ if __name__ == '__main__': default="tb_log_dir/image", help='Tensorboard logging directory for image.') parser.add_argument( - '--model_path', - type=str, - default=None, - help="inference model path") + '--model_path', type=str, default=None, help="inference model path") parser.add_argument( '--model_name', type=str, diff --git a/PaddleCV/PaddleDetection/slim/quantization/compress.py b/PaddleCV/PaddleDetection/slim/quantization/compress.py index 6e3156ac..e9492168 100644 --- a/PaddleCV/PaddleDetection/slim/quantization/compress.py +++ b/PaddleCV/PaddleDetection/slim/quantization/compress.py @@ -28,11 +28,13 @@ from paddle.fluid.contrib.slim import Compressor from paddle.fluid.framework import IrGraph from paddle.fluid import core + def set_paddle_flags(**kwargs): for key, value in kwargs.items(): if os.environ.get(key, None) is None: os.environ[key] = str(value) + # NOTE(paddle-dev): All of these flags should be set before # `import paddle`. Otherwise, it would not take any effect. set_paddle_flags( @@ -55,6 +57,8 @@ import logging FORMAT = '%(asctime)s-%(levelname)s: %(message)s' logging.basicConfig(level=logging.INFO, format=FORMAT) logger = logging.getLogger(__name__) + + def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): """ Run evaluation program, return program outputs. @@ -73,8 +77,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): has_bbox = 'bbox' in keys for data in reader(): data = test_feed.feed(data) - feed_data = {'image': data['image'], - 'im_size': data['im_size']} + feed_data = {'image': data['image'], 'im_size': data['im_size']} outs = exe.run(compile_program, feed=feed_data, fetch_list=[values[0]], @@ -155,16 +158,14 @@ def main(): optimizer = optim_builder(lr) optimizer.minimize(loss) - - train_reader = create_reader(train_feed, cfg.max_iters, - FLAGS.dataset_dir) + train_reader = create_reader(train_feed, cfg.max_iters, FLAGS.dataset_dir) train_loader.set_sample_list_generator(train_reader, place) # parse train fetches train_keys, train_values, _ = parse_fetches(train_fetches) train_values.append(lr) - train_fetch_list=[] + train_fetch_list = [] for k, v in zip(train_keys, train_values): train_fetch_list.append((k, v)) print("train_fetch_list: {}".format(train_fetch_list)) @@ -188,18 +189,16 @@ def main(): if cfg.metric == 'VOC': extra_keys = ['gt_box', 'gt_label', 'is_difficult'] eval_keys, eval_values, eval_cls = parse_fetches(fetches, eval_prog, - extra_keys) + extra_keys) # print(eval_values) - eval_fetch_list=[] + eval_fetch_list = [] for k, v in zip(eval_keys, eval_values): eval_fetch_list.append((k, v)) - exe.run(startup_prog) start_iter = 0 - checkpoint.load_params(exe, train_prog, cfg.pretrain_weights) best_box_ap_list = [] @@ -208,20 +207,20 @@ def main(): #place = fluid.CPUPlace() #exe = fluid.Executor(place) - results = eval_run(exe, program, eval_reader, - eval_keys, eval_values, eval_cls, test_data_feed) + results = eval_run(exe, program, eval_reader, eval_keys, eval_values, + eval_cls, test_data_feed) resolution = None if 'mask' in results[0]: resolution = model.mask_head.resolution - box_ap_stats = eval_results(results, eval_feed, cfg.metric, cfg.num_classes, - resolution, False, FLAGS.output_eval) + box_ap_stats = eval_results(results, eval_feed, cfg.metric, + cfg.num_classes, resolution, False, + FLAGS.output_eval) if len(best_box_ap_list) == 0: best_box_ap_list.append(box_ap_stats[0]) elif box_ap_stats[0] > best_box_ap_list[0]: best_box_ap_list[0] = box_ap_stats[0] - logger.info("Best test box ap: {}".format( - best_box_ap_list[0])) + logger.info("Best test box ap: {}".format(best_box_ap_list[0])) return best_box_ap_list[0] test_feed = [('image', test_feed_vars['image'].name), @@ -239,13 +238,12 @@ def main(): eval_feed_list=test_feed, eval_func={'map': eval_func}, eval_fetch_list=[eval_fetch_list[0]], - prune_infer_model=[["image", "im_size"],["multiclass_nms_0.tmp_0"]], + prune_infer_model=[["image", "im_size"], ["multiclass_nms_0.tmp_0"]], train_optimizer=None) com.config(FLAGS.slim_file) com.run() - if __name__ == '__main__': parser = ArgsParser() parser.add_argument( diff --git a/PaddleCV/PaddleDetection/slim/quantization/freeze.py b/PaddleCV/PaddleDetection/slim/quantization/freeze.py index 85906fa8..bdf330c1 100644 --- a/PaddleCV/PaddleDetection/slim/quantization/freeze.py +++ b/PaddleCV/PaddleDetection/slim/quantization/freeze.py @@ -32,11 +32,13 @@ from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass from paddle.fluid.contrib.slim.quantization import TransformForMobilePass + def set_paddle_flags(**kwargs): for key, value in kwargs.items(): if os.environ.get(key, None) is None: os.environ[key] = str(value) + # NOTE(paddle-dev): All of these flags should be set before # `import paddle`. Otherwise, it would not take any effect. set_paddle_flags( @@ -59,6 +61,8 @@ import logging FORMAT = '%(asctime)s-%(levelname)s: %(message)s' logging.basicConfig(level=logging.INFO, format=FORMAT) logger = logging.getLogger(__name__) + + def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): """ Run evaluation program, return program outputs. @@ -71,8 +75,7 @@ def eval_run(exe, compile_program, reader, keys, values, cls, test_feed): has_bbox = 'bbox' in keys for data in reader(): data = test_feed.feed(data) - feed_data = {'image': data['image'], - 'im_size': data['im_size']} + feed_data = {'image': data['image'], 'im_size': data['im_size']} outs = exe.run(compile_program, feed=feed_data, fetch_list=values[0], @@ -123,7 +126,6 @@ def main(): devices_num = int( os.environ.get('CPU_NUM', multiprocessing.cpu_count())) - if 'eval_feed' not in cfg: eval_feed = create(main_arch + 'EvalFeed') else: @@ -138,85 +140,78 @@ def main(): #eval_pyreader.decorate_sample_list_generator(eval_reader, place) test_data_feed = fluid.DataFeeder(test_feed_vars.values(), place) - assert os.path.exists(FLAGS.model_path) infer_prog, feed_names, fetch_targets = fluid.io.load_inference_model( - dirname=FLAGS.model_path, executor=exe, - model_filename='__model__.infer', - params_filename='__params__') + dirname=FLAGS.model_path, + executor=exe, + model_filename='__model__.infer', + params_filename='__params__') eval_keys = ['bbox', 'gt_box', 'gt_label', 'is_difficult'] - eval_values = ['multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult'] + eval_values = [ + 'multiclass_nms_0.tmp_0', 'gt_box', 'gt_label', 'is_difficult' + ] eval_cls = [] eval_values[0] = fetch_targets[0] - results = eval_run(exe, infer_prog, eval_reader, - eval_keys, eval_values, eval_cls, test_data_feed) - + results = eval_run(exe, infer_prog, eval_reader, eval_keys, eval_values, + eval_cls, test_data_feed) resolution = None if 'mask' in results[0]: resolution = model.mask_head.resolution box_ap_stats = eval_results(results, eval_feed, cfg.metric, cfg.num_classes, - resolution, False, FLAGS.output_eval) + resolution, False, FLAGS.output_eval) logger.info("freeze the graph for inference") test_graph = IrGraph(core.Graph(infer_prog.desc), for_test=True) freeze_pass = QuantizationFreezePass( - scope=fluid.global_scope(), - place=place, - weight_quantize_type=FLAGS.weight_quant_type) + scope=fluid.global_scope(), + place=place, + weight_quantize_type=FLAGS.weight_quant_type) freeze_pass.apply(test_graph) server_program = test_graph.to_program() fluid.io.save_inference_model( - dirname=os.path.join(FLAGS.save_path, 'float'), - feeded_var_names=feed_names, - target_vars=fetch_targets, - executor=exe, - main_program=server_program, - model_filename='model', - params_filename='weights') + dirname=os.path.join(FLAGS.save_path, 'float'), + feeded_var_names=feed_names, + target_vars=fetch_targets, + executor=exe, + main_program=server_program, + model_filename='model', + params_filename='weights') logger.info("convert the weights into int8 type") convert_int8_pass = ConvertToInt8Pass( - scope=fluid.global_scope(), - place=place) + scope=fluid.global_scope(), place=place) convert_int8_pass.apply(test_graph) server_int8_program = test_graph.to_program() fluid.io.save_inference_model( - dirname=os.path.join(FLAGS.save_path, 'int8'), - feeded_var_names=feed_names, - target_vars=fetch_targets, - executor=exe, - main_program=server_int8_program, - model_filename='model', - params_filename='weights') + dirname=os.path.join(FLAGS.save_path, 'int8'), + feeded_var_names=feed_names, + target_vars=fetch_targets, + executor=exe, + main_program=server_int8_program, + model_filename='model', + params_filename='weights') logger.info("convert the freezed pass to paddle-lite execution") mobile_pass = TransformForMobilePass() mobile_pass.apply(test_graph) mobile_program = test_graph.to_program() fluid.io.save_inference_model( - dirname=os.path.join(FLAGS.save_path, 'mobile'), - feeded_var_names=feed_names, - target_vars=fetch_targets, - executor=exe, - main_program=mobile_program, - model_filename='model', - params_filename='weights') - - - + dirname=os.path.join(FLAGS.save_path, 'mobile'), + feeded_var_names=feed_names, + target_vars=fetch_targets, + executor=exe, + main_program=mobile_program, + model_filename='model', + params_filename='weights') if __name__ == '__main__': parser = ArgsParser() parser.add_argument( - "-m", - "--model_path", - default=None, - type=str, - help="path of checkpoint") + "-m", "--model_path", default=None, type=str, help="path of checkpoint") parser.add_argument( "--output_eval", default=None, diff --git a/PaddleSlim/classification/eval.py b/PaddleSlim/classification/eval.py index f091a9e9..644c99c4 100644 --- a/PaddleSlim/classification/eval.py +++ b/PaddleSlim/classification/eval.py @@ -33,22 +33,24 @@ add_arg('model_name', str, "__model__", "model filename for inference model") add_arg('params_name', str, "__params__", "params filename for inference model") # yapf: enable + def eval(args): # parameters from arguments place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) - val_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(args.model_path, - exe, - model_filename=args.model_name, - params_filename=args.params_name) + val_program, feed_target_names, fetch_targets = fluid.io.load_inference_model( + args.model_path, + exe, + model_filename=args.model_name, + params_filename=args.params_name) val_reader = paddle.batch(reader.val(), batch_size=128) - feeder = fluid.DataFeeder(place=place, feed_list=feed_target_names, program=val_program) + feeder = fluid.DataFeeder( + place=place, feed_list=feed_target_names, program=val_program) - results=[] + results = [] for batch_id, data in enumerate(val_reader()): - # top1_acc, top5_acc if len(feed_target_names) == 1: # eval "infer model", which input is image, output is classification probability @@ -56,8 +58,8 @@ def eval(args): label = [[d[1]] for d in data] feed_data = feeder.feed(image) pred = exe.run(val_program, - feed=feed_data, - fetch_list=fetch_targets) + feed=feed_data, + fetch_list=fetch_targets) pred = np.array(pred[0]) label = np.array(label) sort_array = pred.argsort(axis=1) @@ -73,18 +75,20 @@ def eval(args): else: # eval "eval model", which inputs are image and label, output is top1 and top5 accuracy result = exe.run(val_program, - feed=feeder.feed(data), - fetch_list=fetch_targets) + feed=feeder.feed(data), + fetch_list=fetch_targets) result = [np.mean(r) for r in result] results.append(result) result = np.mean(np.array(results), axis=0) print("top1_acc/top5_acc= {}".format(result)) sys.stdout.flush() + def main(): args = parser.parse_args() print_arguments(args) eval(args) + if __name__ == '__main__': main() diff --git a/PaddleSlim/classification/infer.py b/PaddleSlim/classification/infer.py index 7cc7f967..9fa4178e 100644 --- a/PaddleSlim/classification/infer.py +++ b/PaddleSlim/classification/infer.py @@ -34,20 +34,24 @@ add_arg('model_name', str, "__model__.infer", "inference model filename") add_arg('params_name', str, "__params__", "inference model params filename") # yapf: enable + def infer(args): # parameters from arguments place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) - test_program, feed_target_names, fetch_targets = fluid.io.load_inference_model(args.model_path, - exe, - model_filename=args.model_name, - params_filename=args.params_name) + test_program, feed_target_names, fetch_targets = fluid.io.load_inference_model( + args.model_path, + exe, + model_filename=args.model_name, + params_filename=args.params_name) test_reader = paddle.batch(reader.test(), batch_size=1) - feeder = fluid.DataFeeder(place=place, feed_list=feed_target_names, program=test_program) + feeder = fluid.DataFeeder( + place=place, feed_list=feed_target_names, program=test_program) + + results = [] - results=[] #for infer time, if you don't need, please change infer_time to False infer_time = True compile_prog = fluid.compiler.CompiledProgram(test_program) @@ -58,28 +62,27 @@ def infer(args): repeats_time = 100 feed_data = feeder.feed(data) for i in range(warmup_times): - exe.run(compile_prog, - feed=feed_data, - fetch_list=fetch_targets) + exe.run(compile_prog, feed=feed_data, fetch_list=fetch_targets) start_time = time.time() for i in range(repeats_time): - exe.run(compile_prog, - feed=feed_data, - fetch_list=fetch_targets) - print("infer time: {} ms/sample".format((time.time()-start_time) * 1000 / repeats_time)) + exe.run(compile_prog, feed=feed_data, fetch_list=fetch_targets) + print("infer time: {} ms/sample".format((time.time() - start_time) * + 1000 / repeats_time)) infer_time = False # top1_acc, top5_acc result = exe.run(compile_prog, - feed=feeder.feed(data), - fetch_list=fetch_targets) + feed=feeder.feed(data), + fetch_list=fetch_targets) result = np.array(result[0]) - print(result.argsort(axis=1)[:,-1:][::-1]) + print(result.argsort(axis=1)[:, -1:][::-1]) sys.stdout.flush() + def main(): args = parser.parse_args() print_arguments(args) infer(args) + if __name__ == '__main__': main() diff --git a/PaddleSlim/classification/quantization/compress.py b/PaddleSlim/classification/quantization/compress.py index 4b4f0f2e..88c8d72c 100644 --- a/PaddleSlim/classification/quantization/compress.py +++ b/PaddleSlim/classification/quantization/compress.py @@ -38,7 +38,8 @@ def compress(args): image_shape = "3,224,224" image_shape = [int(m) for m in image_shape.split(",")] - image = fluid.data(name='image', shape=[None] + image_shape, dtype='float32') + image = fluid.data( + name='image', shape=[None] + image_shape, dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='int64') # model definition model = models.__dict__[args.model]() @@ -99,6 +100,7 @@ def compress(args): distiller_optimizer=None) com_pass.config(args.config_file) com_pass.run() + conv_op_num = 0 fake_quant_op_num = 0 for op in com_pass.context.eval_graph.ops(): @@ -110,7 +112,6 @@ def compress(args): print('fake quant op num {}'.format(fake_quant_op_num)) - def main(): args = parser.parse_args() print_arguments(args) diff --git a/PaddleSlim/classification/quantization/freeze.py b/PaddleSlim/classification/quantization/freeze.py index 6e97e024..fdff8fb9 100644 --- a/PaddleSlim/classification/quantization/freeze.py +++ b/PaddleSlim/classification/quantization/freeze.py @@ -45,27 +45,28 @@ add_arg('save_path', str, './output', 'Path to save inference model') add_arg('weight_quant_type', str, 'abs_max', 'quantization type for weight') # yapf: enable + def eval(args): # parameters from arguments place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) - val_program, feed_names, fetch_targets = fluid.io.load_inference_model(args.model_path, - exe, - model_filename="__model__.infer", - params_filename="__params__") + val_program, feed_names, fetch_targets = fluid.io.load_inference_model( + args.model_path, + exe, + model_filename="__model__.infer", + params_filename="__params__") val_reader = paddle.batch(reader.val(), batch_size=128) - feeder = fluid.DataFeeder(place=place, feed_list=feed_names, program=val_program) + feeder = fluid.DataFeeder( + place=place, feed_list=feed_names, program=val_program) - results=[] + results = [] for batch_id, data in enumerate(val_reader()): image = [[d[0]] for d in data] label = [[d[1]] for d in data] feed_data = feeder.feed(image) - pred = exe.run(val_program, - feed=feed_data, - fetch_list=fetch_targets) + pred = exe.run(val_program, feed=feed_data, fetch_list=fetch_targets) pred = np.array(pred[0]) label = np.array(label) sort_array = pred.argsort(axis=1) @@ -82,56 +83,58 @@ def eval(args): result = np.mean(np.array(results), axis=0) print("top1_acc/top5_acc= {}".format(result)) sys.stdout.flush() + _logger.info("freeze the graph for inference") test_graph = IrGraph(core.Graph(val_program.desc), for_test=True) freeze_pass = QuantizationFreezePass( - scope=fluid.global_scope(), - place=place, - weight_quantize_type=args.weight_quant_type) + scope=fluid.global_scope(), + place=place, + weight_quantize_type=args.weight_quant_type) freeze_pass.apply(test_graph) server_program = test_graph.to_program() fluid.io.save_inference_model( - dirname=os.path.join(args.save_path, 'float'), - feeded_var_names=feed_names, - target_vars=fetch_targets, - executor=exe, - main_program=server_program, - model_filename='model', - params_filename='weights') + dirname=os.path.join(args.save_path, 'float'), + feeded_var_names=feed_names, + target_vars=fetch_targets, + executor=exe, + main_program=server_program, + model_filename='model', + params_filename='weights') _logger.info("convert the weights into int8 type") convert_int8_pass = ConvertToInt8Pass( - scope=fluid.global_scope(), - place=place) + scope=fluid.global_scope(), place=place) convert_int8_pass.apply(test_graph) server_int8_program = test_graph.to_program() fluid.io.save_inference_model( - dirname=os.path.join(args.save_path, 'int8'), - feeded_var_names=feed_names, - target_vars=fetch_targets, - executor=exe, - main_program=server_int8_program, - model_filename='model', - params_filename='weights') + dirname=os.path.join(args.save_path, 'int8'), + feeded_var_names=feed_names, + target_vars=fetch_targets, + executor=exe, + main_program=server_int8_program, + model_filename='model', + params_filename='weights') _logger.info("convert the freezed pass to paddle-lite execution") mobile_pass = TransformForMobilePass() mobile_pass.apply(test_graph) mobile_program = test_graph.to_program() fluid.io.save_inference_model( - dirname=os.path.join(args.save_path, 'mobile'), - feeded_var_names=feed_names, - target_vars=fetch_targets, - executor=exe, - main_program=mobile_program, - model_filename='model', - params_filename='weights') + dirname=os.path.join(args.save_path, 'mobile'), + feeded_var_names=feed_names, + target_vars=fetch_targets, + executor=exe, + main_program=mobile_program, + model_filename='model', + params_filename='weights') + def main(): args = parser.parse_args() print_arguments(args) eval(args) + if __name__ == '__main__': main() -- GitLab