From d9fa688a6154f7df4fdacae445937f16027418f2 Mon Sep 17 00:00:00 2001 From: Guanghua Yu <742925032@qq.com> Date: Mon, 17 Oct 2022 20:44:59 +0800 Subject: [PATCH] update dygraph new format QAT API (#1464) --- demo/dygraph/quant/train.py | 5 ++-- paddleslim/dygraph/quant/qat.py | 44 ++++++++++++++++++++------------- 2 files changed, 30 insertions(+), 19 deletions(-) diff --git a/demo/dygraph/quant/train.py b/demo/dygraph/quant/train.py index 202b3eb4..31fc67be 100644 --- a/demo/dygraph/quant/train.py +++ b/demo/dygraph/quant/train.py @@ -165,6 +165,8 @@ def compress(args): 'moving_rate': 0.9, # for dygraph quantization, layers of type in quantizable_layer_type will be quantized 'quantizable_layer_type': ['Conv2D', 'Linear'], + # # Whether to export the quantized model with format of ONNX. + 'onnx_format': args.onnx_format, } if args.use_pact: @@ -360,8 +362,7 @@ def compress(args): input_spec=[ paddle.static.InputSpec( shape=[None, 3, 224, 224], dtype='float32') - ], - onnx_format=args.onnx_format) + ]) def main(): diff --git a/paddleslim/dygraph/quant/qat.py b/paddleslim/dygraph/quant/qat.py index 34b1ae7b..05b1b40d 100644 --- a/paddleslim/dygraph/quant/qat.py +++ b/paddleslim/dygraph/quant/qat.py @@ -58,6 +58,8 @@ _quant_config_default = { 'quantizable_layer_type': ['Conv2D', 'Linear'], # whether fuse conv and bn before QAT 'fuse_conv_bn': False, + # Whether to export the quantized model with format of ONNX. Default is False. + 'onnx_format': False, } @@ -215,7 +217,9 @@ class QAT(object): weight_preprocess_layer=self.weight_preprocess, act_preprocess_layer=self.act_preprocess, weight_quantize_layer=self.weight_quantize, - act_quantize_layer=self.act_quantize) + act_quantize_layer=self.act_quantize, + onnx_format=self.config['onnx_format'], # support Paddle >= 2.4 + ) except: self.imperative_qat = ImperativeQuantAware( weight_bits=self.config['weight_bits'], @@ -257,11 +261,7 @@ class QAT(object): return quant_model - def save_quantized_model(self, - model, - path, - input_spec=None, - onnx_format=False): + def save_quantized_model(self, model, path, input_spec=None): """ Save the quantized inference model. @@ -287,20 +287,30 @@ class QAT(object): model.eval() self.imperative_qat.save_quantized_model( - layer=model, - path=path, - input_spec=input_spec, - onnx_format=onnx_format) + layer=model, path=path, input_spec=input_spec) def _remove_preprocess(self, model): state_dict = model.state_dict() - self.imperative_qat = ImperativeQuantAware( - weight_bits=self.config['weight_bits'], - activation_bits=self.config['activation_bits'], - weight_quantize_type=self.config['weight_quantize_type'], - activation_quantize_type=self.config['activation_quantize_type'], - moving_rate=self.config['moving_rate'], - quantizable_layer_type=self.config['quantizable_layer_type']) + try: + self.imperative_qat = ImperativeQuantAware( + weight_bits=self.config['weight_bits'], + activation_bits=self.config['activation_bits'], + weight_quantize_type=self.config['weight_quantize_type'], + activation_quantize_type=self.config[ + 'activation_quantize_type'], + moving_rate=self.config['moving_rate'], + quantizable_layer_type=self.config['quantizable_layer_type'], + onnx_format=self.config['onnx_format'], # support Paddle >= 2.4 + ) + except: + self.imperative_qat = ImperativeQuantAware( + weight_bits=self.config['weight_bits'], + activation_bits=self.config['activation_bits'], + weight_quantize_type=self.config['weight_quantize_type'], + activation_quantize_type=self.config[ + 'activation_quantize_type'], + moving_rate=self.config['moving_rate'], + quantizable_layer_type=self.config['quantizable_layer_type']) with paddle.utils.unique_name.guard(): if hasattr(model, "_layers"): -- GitLab