diff --git a/paddle/fluid/inference/tests/api/trt_dynamic_shape_transformer_prune_test.cc b/paddle/fluid/inference/tests/api/trt_dynamic_shape_transformer_prune_test.cc index 3916cf361c4b87602d9abc996788566da0488bbf..965e233b68cc0d0bf0944c60dbe4a591fe8e36b5 100644 --- a/paddle/fluid/inference/tests/api/trt_dynamic_shape_transformer_prune_test.cc +++ b/paddle/fluid/inference/tests/api/trt_dynamic_shape_transformer_prune_test.cc @@ -126,7 +126,7 @@ void trt_ernie(bool with_fp16, std::vector result) { run(config, &out_data); for (size_t i = 0; i < out_data.size(); i++) { - EXPECT_NEAR(result[i], out_data[i], 1e-4); + EXPECT_NEAR(result[i], out_data[i], 2e-3); } } diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py index 8d19d036e825b6d51a85b98c4d87ce55aec46366..f26a565f611e85f5d99e47c92179372dcb1ecced 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_subgraph_pass.py @@ -308,7 +308,10 @@ class TensorRTSubgraphPassActivationTest(InferencePassTest): use_gpu = True if os.path.exists(self.path + "_opt_cache"): shutil.rmtree(self.path + "_opt_cache") - self.check_output_with_option(use_gpu) + if self.trt_parameters.precision == AnalysisConfig.Precision.Float32: + self.check_output_with_option(use_gpu) + else: + self.check_output_with_option(use_gpu, 1e-3) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')) @@ -567,7 +570,7 @@ class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest): use_gpu = True if os.path.exists(self.path + "_opt_cache"): shutil.rmtree(self.path + "_opt_cache") - self.check_output_with_option(use_gpu) + self.check_output_with_option(use_gpu, 1e-3) self.assertTrue( PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))