From e3217e3eb1c9c6a1cf807f57c6d8b15e937e59cf Mon Sep 17 00:00:00 2001 From: Vvsmile <17864154871@163.com> Date: Thu, 30 Mar 2023 15:28:41 +0800 Subject: [PATCH] [AMP OP&Test]Modify the FP16 and BF16 OpTest of Add_N (#52311) * adjust defalut tolerance of output and grad * fix a bug in the grad of OpTest * fix the type of setting defalut value in optest, both forward and backward * add defalut * fix test_sum_op * fix test_sum_op test for testing add_n * modify the add_n op_test --- python/paddle/fluid/tests/unittests/test_sum_op.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index 7845434133d..4678161a059 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -297,14 +297,14 @@ class TestFP16SumOp(TestSumOp): def test_check_output(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_output_with_place(place, atol=2e-2) + self.check_output_with_place(place) # FIXME: Because of the precision fp16, max_relative_error # should be 0.15 here. def test_check_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad(['x0'], 'Out', max_relative_error=0.15) + self.check_grad(['x0'], 'Out') def create_test_sum_fp16_class(parent): -- GitLab