未验证 提交 a8be9b6d 编写于 作者: Z zlsh80826 提交者: GitHub

Enhance eigh, eigvalsh unit tests (#40699)

* Enhance test_eigh_op

* Use eigen decomposition to validate eigen values and vectors
* Fix that TestEighBatchAPI didn't run the batched input

* Enhance test_eigvalsh_op

* Align cusolver tolerance to validate eigenvalues
* Fix that BatchAPI didn't run the batched input

* Add abs for |d_ref|

* Remove comment
上级 eac23db1
......@@ -21,6 +21,53 @@ from op_test import OpTest
from gradient_checker import grad_check
def valid_eigh_result(A, eigh_value, eigh_vector, uplo):
assert A.ndim == 2 or A.ndim == 3
if A.ndim == 2:
valid_single_eigh_result(A, eigh_value, eigh_vector, uplo)
return
for batch_A, batch_w, batch_v in zip(A, eigh_value, eigh_vector):
valid_single_eigh_result(batch_A, batch_w, batch_v, uplo)
def valid_single_eigh_result(A, eigh_value, eigh_vector, uplo):
FP32_MAX_RELATIVE_ERR = 5e-5
FP64_MAX_RELATIVE_ERR = 1e-14
if A.dtype == np.single or A.dtype == np.csingle:
rtol = FP32_MAX_RELATIVE_ERR
else:
rtol = FP64_MAX_RELATIVE_ERR
M, N = A.shape
triangular_func = np.tril if uplo == 'L' else np.triu
if not np.iscomplexobj(A):
# Reconstruct A by filling triangular part
A = triangular_func(A) + triangular_func(A, -1).T
else:
# Reconstruct A to Hermitian matrix
A = triangular_func(A) + np.matrix(triangular_func(A, -1)).H
# Diagonal matrix of eigen value
T = np.diag(eigh_value)
# A = Q*T*Q'
residual = A - (eigh_vector @T @np.linalg.inv(eigh_vector))
# ||A - Q*T*Q'|| / (N*||A||) < rtol
np.testing.assert_array_less(
np.linalg.norm(residual, np.inf) / (N * np.linalg.norm(A, np.inf)),
rtol)
# ||I - Q*Q'|| / M < rtol
residual = np.eye(M) - eigh_vector @np.linalg.inv(eigh_vector)
np.testing.assert_array_less(np.linalg.norm(residual, np.inf) / M, rtol)
class TestEighOp(OpTest):
def setUp(self):
paddle.enable_static()
......@@ -57,46 +104,34 @@ class TestEighGPUCase(unittest.TestCase):
def setUp(self):
self.x_shape = [32, 32]
self.dtype = "float32"
self.UPLO = "L"
np.random.seed(123)
self.x_np = np.random.random(self.x_shape).astype(self.dtype)
if (paddle.version.cuda() >= "11.6"):
self.rtol = 5e-6
self.atol = 6e-5
else:
self.rtol = 1e-5
self.atol = 1e-5
def test_check_output_gpu(self):
if paddle.is_compiled_with_cuda():
paddle.disable_static(place=paddle.CUDAPlace(0))
input_real_data = paddle.to_tensor(self.x_np)
expected_w, expected_v = np.linalg.eigh(self.x_np)
actual_w, actual_v = paddle.linalg.eigh(input_real_data)
np.testing.assert_allclose(
actual_w, expected_w, rtol=self.rtol, atol=self.atol)
np.testing.assert_allclose(
abs(actual_v.numpy()),
abs(expected_v),
rtol=self.rtol,
atol=self.atol)
actual_w, actual_v = paddle.linalg.eigh(input_real_data, self.UPLO)
valid_eigh_result(self.x_np,
actual_w.numpy(), actual_v.numpy(), self.UPLO)
class TestEighAPI(unittest.TestCase):
def setUp(self):
self.init_input_data()
self.UPLO = 'L'
if (paddle.version.cuda() >= "11.6"):
self.rtol = 5e-6
self.atol = 6e-5
else:
self.rtol = 1e-5
self.atol = 1e-5
self.rtol = 1e-5 # for test_eigh_grad
self.atol = 1e-5 # for test_eigh_grad
self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
else paddle.CPUPlace()
np.random.seed(123)
def init_input_data(self):
def init_input_shape(self):
self.x_shape = [5, 5]
def init_input_data(self):
self.init_input_shape()
self.dtype = "float32"
self.real_data = np.random.random(self.x_shape).astype(self.dtype)
complex_data = np.random.random(self.x_shape).astype(
......@@ -108,12 +143,6 @@ class TestEighAPI(unittest.TestCase):
self.complex_symm = np.divide(
complex_data + np.conj(complex_data.transpose(self.trans_dims)), 2)
def compare_result(self, actual_w, actual_v, expected_w, expected_v):
np.testing.assert_allclose(
actual_w, expected_w, rtol=self.rtol, atol=self.atol)
np.testing.assert_allclose(
abs(actual_v), abs(expected_v), rtol=self.rtol, atol=self.atol)
def check_static_float_result(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
......@@ -122,12 +151,10 @@ class TestEighAPI(unittest.TestCase):
'input_x', shape=self.x_shape, dtype=self.dtype)
output_w, output_v = paddle.linalg.eigh(input_x)
exe = paddle.static.Executor(self.place)
expected_w, expected_v = exe.run(main_prog,
feed={"input_x": self.real_data},
fetch_list=[output_w, output_v])
actual_w, actual_v = np.linalg.eigh(self.real_data)
self.compare_result(actual_w, actual_v, expected_w, expected_v)
actual_w, actual_v = exe.run(main_prog,
feed={"input_x": self.real_data},
fetch_list=[output_w, output_v])
valid_eigh_result(self.real_data, actual_w, actual_v, self.UPLO)
def check_static_complex_result(self):
main_prog = paddle.static.Program()
......@@ -138,12 +165,10 @@ class TestEighAPI(unittest.TestCase):
'input_x', shape=self.x_shape, dtype=x_dtype)
output_w, output_v = paddle.linalg.eigh(input_x)
exe = paddle.static.Executor(self.place)
expected_w, expected_v = exe.run(
main_prog,
feed={"input_x": self.complex_symm},
fetch_list=[output_w, output_v])
actual_w, actual_v = np.linalg.eigh(self.complex_symm)
self.compare_result(actual_w, actual_v, expected_w, expected_v)
actual_w, actual_v = exe.run(main_prog,
feed={"input_x": self.complex_symm},
fetch_list=[output_w, output_v])
valid_eigh_result(self.complex_symm, actual_w, actual_v, self.UPLO)
def test_in_static_mode(self):
paddle.enable_static()
......@@ -153,14 +178,14 @@ class TestEighAPI(unittest.TestCase):
def test_in_dynamic_mode(self):
paddle.disable_static()
input_real_data = paddle.to_tensor(self.real_data)
expected_w, expected_v = np.linalg.eigh(self.real_data)
actual_w, actual_v = paddle.linalg.eigh(input_real_data)
self.compare_result(actual_w, actual_v.numpy(), expected_w, expected_v)
valid_eigh_result(self.real_data,
actual_w.numpy(), actual_v.numpy(), self.UPLO)
input_complex_data = paddle.to_tensor(self.complex_symm)
expected_w, expected_v = np.linalg.eigh(self.complex_symm)
actual_w, actual_v = paddle.linalg.eigh(input_complex_data)
self.compare_result(actual_w, actual_v.numpy(), expected_w, expected_v)
valid_eigh_result(self.complex_symm,
actual_w.numpy(), actual_v.numpy(), self.UPLO)
def test_eigh_grad(self):
paddle.disable_static()
......
......@@ -21,6 +21,31 @@ from op_test import OpTest
from gradient_checker import grad_check
def compare_result(actual, expected):
assert actual.ndim == 1 or actual.ndim == 2
if actual.ndim == 1:
valid_eigenvalues(actual, expected)
return
for batch_actual, batch_expected in zip(actual, expected):
valid_eigenvalues(batch_actual, batch_expected)
def valid_eigenvalues(actual, expected):
FP32_MAX_RELATIVE_ERR = 5e-5
FP64_MAX_RELATIVE_ERR = 1e-14
rtol = FP32_MAX_RELATIVE_ERR if actual.dtype == np.single else FP64_MAX_RELATIVE_ERR
diff = np.abs(expected - actual)
max_diff = np.max(diff)
max_ref = np.max(np.abs(expected))
relative_error = max_diff / max_ref
np.testing.assert_array_less(relative_error, rtol)
class TestEigvalshOp(OpTest):
def setUp(self):
paddle.enable_static()
......@@ -60,12 +85,6 @@ class TestEigvalshGPUCase(unittest.TestCase):
self.dtype = "float32"
np.random.seed(123)
self.x_np = np.random.random(self.x_shape).astype(self.dtype)
if (paddle.version.cuda() >= "11.6"):
self.rtol = 5e-6
self.atol = 6e-5
else:
self.rtol = 1e-5
self.atol = 1e-5
def test_check_output_gpu(self):
if paddle.is_compiled_with_cuda():
......@@ -73,26 +92,24 @@ class TestEigvalshGPUCase(unittest.TestCase):
input_real_data = paddle.to_tensor(self.x_np)
expected_w = np.linalg.eigvalsh(self.x_np)
actual_w = paddle.linalg.eigvalsh(input_real_data)
np.testing.assert_allclose(
actual_w, expected_w, rtol=self.rtol, atol=self.atol)
compare_result(actual_w.numpy(), expected_w)
class TestEigvalshAPI(unittest.TestCase):
def setUp(self):
self.x_shape = [5, 5]
self.dtype = "float32"
self.UPLO = 'L'
if (paddle.version.cuda() >= "11.6"):
self.rtol = 5e-6
self.atol = 6e-5
else:
self.rtol = 1e-5
self.atol = 1e-5
self.rtol = 1e-5 # test_eigvalsh_grad
self.atol = 1e-5 # test_eigvalsh_grad
self.place = paddle.CUDAPlace(0) if paddle.is_compiled_with_cuda() \
else paddle.CPUPlace()
np.random.seed(123)
self.init_input_shape()
self.init_input_data()
def init_input_shape(self):
self.x_shape = [5, 5]
def init_input_data(self):
self.real_data = np.random.random(self.x_shape).astype(self.dtype)
complex_data = np.random.random(self.x_shape).astype(
......@@ -103,10 +120,6 @@ class TestEigvalshAPI(unittest.TestCase):
self.complex_symm = np.divide(
complex_data + np.conj(complex_data.transpose(self.trans_dims)), 2)
def compare_result(self, actual_w, expected_w):
np.testing.assert_allclose(
actual_w, expected_w, rtol=self.rtol, atol=self.atol)
def check_static_float_result(self):
main_prog = paddle.static.Program()
startup_prog = paddle.static.Program()
......@@ -115,12 +128,12 @@ class TestEigvalshAPI(unittest.TestCase):
'input_x', shape=self.x_shape, dtype=self.dtype)
output_w = paddle.linalg.eigvalsh(input_x)
exe = paddle.static.Executor(self.place)
expected_w = exe.run(main_prog,
feed={"input_x": self.real_data},
fetch_list=[output_w])
actual_w = exe.run(main_prog,
feed={"input_x": self.real_data},
fetch_list=[output_w])
actual_w = np.linalg.eigvalsh(self.real_data)
self.compare_result(actual_w, expected_w[0])
expected_w = np.linalg.eigvalsh(self.real_data)
compare_result(actual_w[0], expected_w)
def check_static_complex_result(self):
main_prog = paddle.static.Program()
......@@ -131,11 +144,11 @@ class TestEigvalshAPI(unittest.TestCase):
'input_x', shape=self.x_shape, dtype=x_dtype)
output_w = paddle.linalg.eigvalsh(input_x)
exe = paddle.static.Executor(self.place)
expected_w = exe.run(main_prog,
feed={"input_x": self.complex_symm},
fetch_list=[output_w])
actual_w = np.linalg.eigvalsh(self.complex_symm)
self.compare_result(actual_w, expected_w[0])
actual_w = exe.run(main_prog,
feed={"input_x": self.complex_symm},
fetch_list=[output_w])
expected_w = np.linalg.eigvalsh(self.complex_symm)
compare_result(actual_w[0], expected_w)
def test_in_static_mode(self):
paddle.enable_static()
......@@ -147,12 +160,12 @@ class TestEigvalshAPI(unittest.TestCase):
input_real_data = paddle.to_tensor(self.real_data)
expected_w = np.linalg.eigvalsh(self.real_data)
actual_w = paddle.linalg.eigvalsh(input_real_data)
self.compare_result(actual_w, expected_w)
compare_result(actual_w.numpy(), expected_w)
input_complex_symm = paddle.to_tensor(self.complex_symm)
expected_w = np.linalg.eigvalsh(self.complex_symm)
actual_w = paddle.linalg.eigvalsh(input_complex_symm)
self.compare_result(actual_w, expected_w)
compare_result(actual_w.numpy(), expected_w)
def test_eigvalsh_grad(self):
paddle.disable_static(self.place)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册