From 7858d332f9c06ba5b322a512e1c97ceb4783fcb0 Mon Sep 17 00:00:00 2001 From: cc <52520497+juncaipeng@users.noreply.github.com> Date: Fri, 9 Jul 2021 14:29:22 +0800 Subject: [PATCH] [dygraph qat] change default config and fix bug (#34047) --- .../slim/quantization/imperative/ptq.py | 23 ++++++++++++++----- .../quantization/imperative/ptq_config.py | 2 +- .../contrib/slim/tests/test_imperative_ptq.py | 3 ++- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py index b85a4b66375..3a536ab1d20 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq.py @@ -46,8 +46,8 @@ class ImperativePTQ(object): Args: quant_config(PTQConfig): the config of post training quantization. The config has weight_quantizer and activation_quantizer. - In default, the weight_quantizer and activation_quantizer are - AbsmaxQuantizer. + In default, the weight_quantizer is PerChannelAbsmaxQuantizer + and the activation_quantizer is KLQuantizer. """ super(ImperativePTQ, self).__init__() @@ -70,9 +70,9 @@ class ImperativePTQ(object): "The model must be the instance of paddle.nn.Layer." if not inplace: - new_model = copy.deepcopy(model) + model = copy.deepcopy(model) - for name, layer in new_model.named_sublayers(): + for name, layer in model.named_sublayers(): if PTQRegistry.is_supported_layer(layer) \ and utils.is_leaf_layer(layer) \ and not self._is_skip_layer(layer): @@ -90,13 +90,13 @@ class ImperativePTQ(object): layer._forward_post_hooks.move_to_end( quant_hook_handle._hook_id, last=False) - return new_model + return model def save_quantized_model(self, model, path, input_spec=None, **config): """ 1. Convert the quantized model 2. Call jit.save to save the inference model - 3. Load and postprocess the inference model. + 3. Post process the inference model. Args: model (Layer): The model to be saved. @@ -207,8 +207,19 @@ class ImperativePTQ(object): assert isinstance(model, paddle.nn.Layer), \ "The input model must be the instance of paddle.nn.Layer." + total_num = 0 + cur_num = 0 for name, sub_layer in model.named_sublayers(): if self._is_quant_layer(sub_layer): + total_num += 1 + + for name, sub_layer in model.named_sublayers(): + if self._is_quant_layer(sub_layer): + cur_num += 1 + if cur_num % 5 == 0: + _logger.info("Process the %s / %s layer" % + (cur_num, total_num)) + quant_config = sub_layer._quant_config if quant_config.enable_in_act_quantizer: diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py index 1d089b32181..384d2c704fd 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/ptq_config.py @@ -53,4 +53,4 @@ class PTQConfig(object): self.enable_in_act_quantizer = False -default_ptq_config = PTQConfig(AbsmaxQuantizer(), AbsmaxQuantizer()) +default_ptq_config = PTQConfig(KLQuantizer(), PerChannelAbsmaxQuantizer()) diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py index 24ae75456a0..575a91642a7 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py @@ -82,7 +82,8 @@ class TestImperativePTQ(unittest.TestCase): return data_cache_folder def set_vars(self): - self.ptq = ImperativePTQ(default_ptq_config) + config = PTQConfig(AbsmaxQuantizer(), AbsmaxQuantizer()) + self.ptq = ImperativePTQ(config) self.batch_num = 10 self.batch_size = 10 -- GitLab