未验证 提交 90eb5541 编写于 作者: P pangyoki 提交者: GitHub

fix the precision problem of test_distribution (#27524)

* increase tolerance

* increase the difference between low and high

* change tolerance of Normal log_prob method

* change probs tolerance to 1e-4

* change tolerance of Normal kl method
上级 29d5937a
......@@ -123,7 +123,7 @@ class UniformTest(unittest.TestCase):
def init_numpy_data(self, batch_size, dims):
# low ans high are 'float'
self.low_np = np.random.uniform(-2, 1)
self.high_np = np.random.uniform(1, 3)
self.high_np = np.random.uniform(2, 4)
self.values_np = np.array([1.0]).astype('float32')
def init_dynamic_data(self, batch_size, dims):
......@@ -193,7 +193,7 @@ class UniformTest2(UniformTest):
def init_numpy_data(self, batch_size, dims):
# low ans high are 'int'
self.low_np = int(np.random.uniform(-2, 1))
self.high_np = int(np.random.uniform(1, 3))
self.high_np = int(np.random.uniform(2, 4))
self.values_np = np.array([1.0]).astype('float32')
......@@ -201,7 +201,7 @@ class UniformTest3(UniformTest):
def init_numpy_data(self, batch_size, dims):
# test broadcast: low is float, high is numpy.ndarray with dtype 'float32'.
self.low_np = np.random.uniform(-2, 1)
self.high_np = np.random.uniform(-5.0, 5.0,
self.high_np = np.random.uniform(5.0, 15.0,
(batch_size, dims)).astype('float32')
self.values_np = np.random.randn(batch_size, dims).astype('float32')
......@@ -217,7 +217,7 @@ class UniformTest4(UniformTest):
def init_numpy_data(self, batch_size, dims):
# low and high are numpy.ndarray with dtype 'float32'.
self.low_np = np.random.randn(batch_size, dims).astype('float32')
self.high_np = np.random.uniform(-5.0, 5.0,
self.high_np = np.random.uniform(5.0, 15.0,
(batch_size, dims)).astype('float32')
self.values_np = np.random.randn(batch_size, dims).astype('float32')
......@@ -233,7 +233,7 @@ class UniformTest5(UniformTest):
def init_numpy_data(self, batch_size, dims):
# low and high are numpy.ndarray with dtype 'float64'.
self.low_np = np.random.randn(batch_size, dims).astype('float64')
self.high_np = np.random.uniform(-5.0, 5.0,
self.high_np = np.random.uniform(5.0, 15.0,
(batch_size, dims)).astype('float64')
self.values_np = np.random.randn(batch_size, dims).astype('float64')
......@@ -254,7 +254,7 @@ class UniformTest6(UniformTest):
def init_numpy_data(self, batch_size, dims):
# low and high are Tensor with dtype 'VarType.FP32'.
self.low_np = np.random.randn(batch_size, dims).astype('float32')
self.high_np = np.random.uniform(-5.0, 5.0,
self.high_np = np.random.uniform(5.0, 15.0,
(batch_size, dims)).astype('float32')
self.values_np = np.random.randn(batch_size, dims).astype('float32')
......@@ -277,7 +277,7 @@ class UniformTest7(UniformTest):
def init_numpy_data(self, batch_size, dims):
# low and high are Tensor with dtype 'VarType.FP64'.
self.low_np = np.random.randn(batch_size, dims).astype('float64')
self.high_np = np.random.uniform(-5.0, 5.0,
self.high_np = np.random.uniform(5.0, 15.0,
(batch_size, dims)).astype('float64')
self.values_np = np.random.randn(batch_size, dims).astype('float64')
......@@ -300,7 +300,7 @@ class UniformTest8(UniformTest):
def init_numpy_data(self, batch_size, dims):
# low and high are Tensor with dtype 'VarType.FP64'. value's dtype is 'VarType.FP32'.
self.low_np = np.random.randn(batch_size, dims).astype('float64')
self.high_np = np.random.uniform(-5.0, 5.0,
self.high_np = np.random.uniform(5.0, 15.0,
(batch_size, dims)).astype('float64')
self.values_np = np.random.randn(batch_size, dims).astype('float32')
......@@ -319,6 +319,23 @@ class UniformTest8(UniformTest):
name='values', shape=[dims], dtype='float32')
class UniformTest9(UniformTest):
def init_numpy_data(self, batch_size, dims):
# low and high are numpy.ndarray with dtype 'float32'.
# high < low.
self.low_np = np.random.randn(batch_size, dims).astype('float32')
self.high_np = np.random.uniform(-10.0, -5.0,
(batch_size, dims)).astype('float32')
self.values_np = np.random.randn(batch_size, dims).astype('float32')
def init_static_data(self, batch_size, dims):
self.static_low = self.low_np
self.static_high = self.high_np
with fluid.program_guard(self.test_program):
self.static_values = layers.data(
name='values', shape=[dims], dtype='float32')
class NormalTest(unittest.TestCase):
def setUp(self, use_gpu=False, batch_size=2, dims=3):
self.use_gpu = use_gpu
......@@ -379,13 +396,22 @@ class NormalTest(unittest.TestCase):
np_other_normal = NormalNumpy(self.other_loc_np, self.other_scale_np)
np_kl = np_normal.kl_divergence(np_other_normal)
# Because assign op does not support the input of numpy.ndarray whose dtype is FP64.
# When loc and scale are FP64 numpy.ndarray, we need to use assign op to convert it
# to FP32 Tensor. And then use cast op to convert it to a FP64 Tensor.
# There is a loss of accuracy in this conversion.
# So set the tolerance from 1e-6 to 1e-4.
log_tolerance = 1e-4
np.testing.assert_equal(sample.shape, np_sample.shape)
np.testing.assert_allclose(
entropy, np_entropy, rtol=tolerance, atol=tolerance)
np.testing.assert_allclose(
log_prob, np_lp, rtol=tolerance, atol=tolerance)
np.testing.assert_allclose(probs, np_p, rtol=tolerance, atol=tolerance)
np.testing.assert_allclose(kl, np_kl, rtol=tolerance, atol=tolerance)
log_prob, np_lp, rtol=log_tolerance, atol=log_tolerance)
np.testing.assert_allclose(
probs, np_p, rtol=log_tolerance, atol=log_tolerance)
np.testing.assert_allclose(
kl, np_kl, rtol=log_tolerance, atol=log_tolerance)
def test_normal_distribution_dygraph(self, sample_shape=7, tolerance=1e-6):
paddle.disable_static(self.place)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册