From ad6fee2fa8d8490fd8c45908eabd8fc66f5e4fe9 Mon Sep 17 00:00:00 2001 From: Bai Yifan Date: Wed, 13 Jan 2021 19:07:17 +0800 Subject: [PATCH] fix quantize error in speical naming model (#30354) --- .../contrib/slim/quantization/imperative/qat.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py index 37f1a13e31b..26fa0f0d484 100644 --- a/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py +++ b/python/paddle/fluid/contrib/slim/quantization/imperative/qat.py @@ -265,13 +265,20 @@ class ImperativeQuantAware(object): if hasattr(layer, "skip_quant") and layer.skip_quant == True: continue - scopes = name.split('.') - target = scopes[-1] + last_idx = 0 + idx = 0 obj = model parent = model - for i in range(len(scopes) - 1): - obj = getattr(parent, scopes[i]) - parent = obj + + while idx < len(name): + if (name[idx] == '.'): + if hasattr(parent, name[last_idx:idx]): + obj = getattr(obj, name[last_idx:idx]) + parent = obj + last_idx = idx + 1 + idx += 1 + target = name[last_idx:idx] + quant_layer = self._get_quantized_counterpart(layer) setattr(quant_layer, "layer_name", layer.full_name()) setattr(obj, target, quant_layer) -- GitLab