From 6f1bd154bd3fd6daa5ce217e3af55449557d15a0 Mon Sep 17 00:00:00 2001 From: itminner <397809320@qq.com> Date: Mon, 11 Nov 2019 17:31:31 +0800 Subject: [PATCH] change convert api return --- paddleslim/quant/quanter.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/paddleslim/quant/quanter.py b/paddleslim/quant/quanter.py index 0bcf50d0..b2e29ae1 100644 --- a/paddleslim/quant/quanter.py +++ b/paddleslim/quant/quanter.py @@ -26,7 +26,7 @@ WEIGHT_QUANTIZATION_TYPES=['abs_max', 'channel_wise_abs_max'] ACTIVATION_QUANTIZATION_TYPES=['abs_max','range_abs_max', 'moving_average_abs_max'] VALID_DTYPES = ['int8'] -quant_config_default = { +_quant_config_default = { # weight quantize type, default is 'abs_max' 'weight_quantize_type': 'abs_max', # activation quantize type, default is 'abs_max' @@ -59,7 +59,7 @@ def _parse_configs(user_config): configs(dict): final configs will be used. """ - configs = copy.deepcopy(quant_config_default) + configs = copy.deepcopy(_quant_config_default) configs.update(user_config) # check configs is valid @@ -209,13 +209,11 @@ def convert(program, scope, place, config, save_int8=False): weight_quantize_type=config['weight_quantize_type']) freeze_pass.apply(test_graph) freezed_program = test_graph.to_program() - freezed_program_int8 = None if save_int8: convert_int8_pass = ConvertToInt8Pass(scope=fluid.global_scope(), place=place) convert_int8_pass.apply(test_graph) freezed_program_int8 = test_graph.to_program() - - return freezed_program, freezed_program_int8 - - + return freezed_program, freezed_program_int8 + else: + return freezed_program -- GitLab