diff --git a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py index 5996e752c8c22da72d7bc177b0fcefd8669714b4..06f3f5f3afa7505f2582abbabcda342f70c2fd6d 100644 --- a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py +++ b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py @@ -578,6 +578,7 @@ class PostTrainingQuantization(object): var_tensor = _load_variable_data(self._scope, var_name) var_tensor = var_tensor.flatten() abs_max_value = float(np.max(np.abs(var_tensor))) + abs_max_value = 1e-8 if abs_max_value == 0.0 else abs_max_value s = 0.3 if var_name not in self._best_mse_loss: self._best_mse_loss[var_name] = float('inf') diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index b3b12a477e2a0a85dbc4889ab3d864f08a801791..9917730daa543f2ba5a9aff5cd4afefeb46123ae 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -1312,6 +1312,7 @@ class QuantizationFreezePass(object): assert self._is_float( scale_v), 'The scale of parameter %s is not a float.' % ( original_var_name) + scale_v = 1e-8 if scale_v == 0.0 else scale_v max_range *= param_range / scale_v else: max_range *= act_range @@ -1413,6 +1414,7 @@ class QuantizationFreezePass(object): x[:, i] = _clip(x[:, i], s) x[:, i] = np.round(x[:, i] / s * bnt) else: + scale = 1e-8 if scale == 0.0 else scale x = _clip(x, scale) x = np.round(x / scale * bnt) return x