From 9c214906ef5c4bee9a6db3cb9773cee0f47c4450 Mon Sep 17 00:00:00 2001 From: ceci3 Date: Tue, 17 May 2022 20:28:15 +0800 Subject: [PATCH] fix when params_filename is None (#1106) --- paddleslim/auto_compression/auto_strategy.py | 24 ++++++++++++++----- paddleslim/auto_compression/compressor.py | 4 ++++ .../create_compressed_program.py | 3 ++- .../auto_compression/strategy_config.py | 5 ++-- 4 files changed, 27 insertions(+), 9 deletions(-) diff --git a/paddleslim/auto_compression/auto_strategy.py b/paddleslim/auto_compression/auto_strategy.py index 2a88058c..2e8ef96a 100644 --- a/paddleslim/auto_compression/auto_strategy.py +++ b/paddleslim/auto_compression/auto_strategy.py @@ -71,14 +71,26 @@ def create_strategy_config(strategy_str, model_type): dis_config = Distillation() if len(tmp_s) == 3: + ### TODO(ceci3): choose prune algo automatically + if 'prune' in tmp_s[0]: + ### default prune config + default_prune_config = { + 'pruned_ratio': float(tmp_s[1]), + 'prune_algo': 'prune', + 'criterion': 'l1_norm' + } + else: + ### default unstruture prune config + default_prune_config = { + 'prune_strategy': + 'gmp', ### default unstruture prune strategy is gmp + 'prune_mode': 'ratio', + 'pruned_ratio': float(tmp_s[1]), + 'local_sparsity': True, + 'prune_params_type': 'conv1x1_only' + } tmp_s[0] = tmp_s[0].replace('prune', 'Prune') tmp_s[0] = tmp_s[0].replace('sparse', 'UnstructurePrune') - ### TODO(ceci3): auto choose prune algo - default_prune_config = { - 'pruned_ratio': float(tmp_s[1]), - 'prune_algo': 'prune', - 'criterion': 'l1_norm' - } if model_type == 'transformer' and tmp_s[0] == 'Prune': default_prune_config['prune_algo'] = 'transformer_pruner' prune_config = eval(tmp_s[0])(**default_prune_config) diff --git a/paddleslim/auto_compression/compressor.py b/paddleslim/auto_compression/compressor.py index fcfaec8c..a6641b34 100644 --- a/paddleslim/auto_compression/compressor.py +++ b/paddleslim/auto_compression/compressor.py @@ -97,7 +97,11 @@ class AutoCompression: deploy_hardware(str, optional): The hardware you want to deploy. Default: 'gpu'. """ self.model_dir = model_dir + if model_filename == 'None': + model_filename = None self.model_filename = model_filename + if params_filename == 'None': + params_filename = None self.params_filename = params_filename base_path = os.path.basename(os.path.normpath(save_dir)) parent_path = os.path.abspath(os.path.join(save_dir, os.pardir)) diff --git a/paddleslim/auto_compression/create_compressed_program.py b/paddleslim/auto_compression/create_compressed_program.py index f2d52156..c61a2558 100644 --- a/paddleslim/auto_compression/create_compressed_program.py +++ b/paddleslim/auto_compression/create_compressed_program.py @@ -100,7 +100,8 @@ def _load_program_and_merge(executor, feed_target_names=None): scope = paddle.static.global_scope() new_scope = paddle.static.Scope() - print(model_dir, model_filename, params_filename) + if params_filename == 'None': + params_filename = None try: with paddle.static.scope_guard(new_scope): [teacher_program, teacher_feed_target_names, teacher_fetch_targets]= paddle.fluid.io.load_inference_model( \ diff --git a/paddleslim/auto_compression/strategy_config.py b/paddleslim/auto_compression/strategy_config.py index 126884d7..dfc96980 100644 --- a/paddleslim/auto_compression/strategy_config.py +++ b/paddleslim/auto_compression/strategy_config.py @@ -34,8 +34,9 @@ Quantization = namedtuple( "weight_quantize_type" ]) -Quantization.__new__.__defaults__ = (None, ) * (len(Quantization._fields) - 1 - ) + (False, ) +Quantization.__new__.__defaults__ = (None, ) * ( + len(Quantization._fields) - 3) + (False, 'moving_average_abs_max', + 'channel_wise_abs_max') ### Distillation: Distillation = namedtuple( -- GitLab