From 32210b08c8c7bc49762223c5ac7d205393dad00e Mon Sep 17 00:00:00 2001 From: pangyoki Date: Sun, 27 Sep 2020 11:38:12 +0000 Subject: [PATCH] change tolerance of Normal log_prob method --- python/paddle/fluid/tests/unittests/test_distribution.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/python/paddle/fluid/tests/unittests/test_distribution.py b/python/paddle/fluid/tests/unittests/test_distribution.py index 2946af2f4b7..bfc128e85e9 100644 --- a/python/paddle/fluid/tests/unittests/test_distribution.py +++ b/python/paddle/fluid/tests/unittests/test_distribution.py @@ -396,11 +396,18 @@ class NormalTest(unittest.TestCase): np_other_normal = NormalNumpy(self.other_loc_np, self.other_scale_np) np_kl = np_normal.kl_divergence(np_other_normal) + # Because assign op does not support the input of numpy.ndarray whose dtype is FP64. + # When loc and scale are FP64 numpy.ndarray, we need to use assign op to convert it + # to FP32 Tensor. And then use cast op to convert it to a FP64 Tensor. + # There is a loss of accuracy in this conversion. + # So set the tolerance from 1e-6 to 1e-4. + log_tolerance = 1e-4 + np.testing.assert_equal(sample.shape, np_sample.shape) np.testing.assert_allclose( entropy, np_entropy, rtol=tolerance, atol=tolerance) np.testing.assert_allclose( - log_prob, np_lp, rtol=tolerance, atol=tolerance) + log_prob, np_lp, rtol=log_tolerance, atol=log_tolerance) np.testing.assert_allclose(probs, np_p, rtol=tolerance, atol=tolerance) np.testing.assert_allclose(kl, np_kl, rtol=tolerance, atol=tolerance) -- GitLab