From bd4ce23e69e8ae1332a7f62140e891c44f984108 Mon Sep 17 00:00:00 2001 From: Leo Chen <39020268+leo0519@users.noreply.github.com> Date: Thu, 8 Sep 2022 13:58:15 +0800 Subject: [PATCH] Increase the threshold of softmax and imperative qat UT (#45819) --- .../fluid/contrib/slim/tests/test_imperative_qat.py | 9 ++++++--- .../slim/tests/test_imperative_qat_channelwise.py | 4 ++-- .../fluid/contrib/slim/tests/test_imperative_qat_fuse.py | 2 +- .../unittests/ir/inference/test_trt_convert_softmax.py | 4 ++-- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py index 2c18eff983..81411daae7 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py @@ -56,7 +56,10 @@ class TestImperativeQat(unittest.TestCase): self.activation_quantize_type = 'moving_average_abs_max' self.onnx_format = False self.check_export_model_accuracy = True - self.diff_threshold = 0.01 + # The original model and quantized model may have different prediction. + # There are 32 test data and we allow at most one is different. + # Hence, the diff_threshold is 1 / 32 = 0.03125 + self.diff_threshold = 0.03125 self.fuse_conv_bn = False def func_qat(self): @@ -207,7 +210,7 @@ class TestImperativeQat(unittest.TestCase): quant_acc = fluid.layers.accuracy(quant_out, label).numpy() paddle.enable_static() delta_value = fp32_acc - quant_acc - self.assertLess(delta_value, self.diff_threshold) + self.assertLessEqual(delta_value, self.diff_threshold) def test_qat(self): with _test_eager_guard(): @@ -221,7 +224,7 @@ class TestImperativeQatONNXFormat(unittest.TestCase): self.weight_quantize_type = 'abs_max' self.activation_quantize_type = 'moving_average_abs_max' self.onnx_format = True - self.diff_threshold = 0.025 + self.diff_threshold = 0.03125 self.fuse_conv_bn = False diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_channelwise.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_channelwise.py index 3770ee4864..f9fa636deb 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_channelwise.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_channelwise.py @@ -43,7 +43,7 @@ class TestImperativeQatChannelWise(TestImperativeQat): def set_vars(self): self.weight_quantize_type = 'channel_wise_abs_max' self.activation_quantize_type = 'moving_average_abs_max' - self.diff_threshold = 0.01 + self.diff_threshold = 0.03125 self.onnx_format = False self.fuse_conv_bn = False print('weight_quantize_type', self.weight_quantize_type) @@ -55,7 +55,7 @@ class TestImperativeQatChannelWiseONNXFormat(TestImperativeQat): self.weight_quantize_type = 'channel_wise_abs_max' self.activation_quantize_type = 'moving_average_abs_max' self.onnx_format = True - self.diff_threshold = 0.025 + self.diff_threshold = 0.03125 self.fuse_conv_bn = False print('weight_quantize_type', self.weight_quantize_type) diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_fuse.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_fuse.py index db7f15c4ce..4c491598d2 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_fuse.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_fuse.py @@ -43,7 +43,7 @@ class TestImperativeQatfuseBN(TestImperativeQat): def set_vars(self): self.weight_quantize_type = 'abs_max' self.activation_quantize_type = 'moving_average_abs_max' - self.diff_threshold = 0.01 + self.diff_threshold = 0.03125 self.onnx_format = False self.fuse_conv_bn = True diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py index b6cef5ca17..6a65382baf 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_softmax.py @@ -126,7 +126,7 @@ class TrtConvertSoftmaxTest(TrtLayerAutoScanTest): attrs, False), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, False), 1e-5 + attrs, False), 1e-3 # for dynamic_shape generate_dynamic_shape(attrs) @@ -135,7 +135,7 @@ class TrtConvertSoftmaxTest(TrtLayerAutoScanTest): attrs, True), 1e-5 self.trt_param.precision = paddle_infer.PrecisionType.Half yield self.create_inference_config(), generate_trt_nodes_num( - attrs, True), 1e-5 + attrs, True), 1e-3 def test(self): self.run_test() -- GitLab