未验证 提交 976961de 编写于 作者: C chentianyu03 提交者: GitHub

fix random failed of complex matmul (#29285)

上级 6673fb05
......@@ -25,60 +25,60 @@ class TestComplexMatMulLayer(unittest.TestCase):
if fluid.core.is_compiled_with_cuda():
self._places.append(fluid.CUDAPlace(0))
def compare(self, x, y):
def compare_by_complex_api(self, x, y):
np_result = np.matmul(x, y)
for place in self._places:
with dg.guard(place):
x_var = dg.to_variable(x)
y_var = dg.to_variable(y)
result = paddle.complex.matmul(x_var, y_var)
np_result = np.matmul(x, y)
self.assertTrue(np.allclose(result.numpy(), np_result))
def compare_1(self, x, y):
def compare_by_basic_api(self, x, y):
np_result = np.matmul(x, y)
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x,
place=fluid.framework._current_expected_place(),
place=place,
persistable=False,
zero_copy=None,
name='')
y_var = fluid.core.VarBase(
value=y,
place=fluid.framework._current_expected_place(),
place=place,
persistable=False,
zero_copy=None,
name='')
result = paddle.matmul(x_var, y_var)
np_result = np.matmul(x, y)
self.assertTrue(np.allclose(result.numpy(), np_result))
def compare_op(self, x, y):
def compare_op_by_complex_api(self, x, y):
np_result = np.matmul(x, y)
for place in self._places:
with dg.guard(place):
x_var = dg.to_variable(x)
y_var = dg.to_variable(y)
result = x_var.matmul(y_var)
np_result = np.matmul(x, y)
self.assertTrue(np.allclose(result.numpy(), np_result))
def compare_op_1(self, x, y):
def compare_op_by_basic_api(self, x, y):
np_result = np.matmul(x, y)
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x,
place=fluid.framework._current_expected_place(),
place=place,
persistable=False,
zero_copy=None,
name='')
y_var = fluid.core.VarBase(
value=y,
place=fluid.framework._current_expected_place(),
place=place,
persistable=False,
zero_copy=None,
name='')
result = x_var.matmul(y_var)
np_result = np.matmul(x, y)
self.assertTrue(np.allclose(result.numpy(), np_result))
def test_complex_xy(self):
......@@ -88,35 +88,35 @@ class TestComplexMatMulLayer(unittest.TestCase):
y = np.random.random(
(2, 3, 5, 4)).astype("float32") + 1J * np.random.random(
(2, 3, 5, 4)).astype("float32")
self.compare(x, y)
self.compare_op(x, y)
self.compare_1(x, y)
self.compare_op_1(x, y)
self.compare_by_complex_api(x, y)
self.compare_op_by_complex_api(x, y)
self.compare_by_basic_api(x, y)
self.compare_op_by_basic_api(x, y)
def test_complex_x(self):
x = np.random.random(
(2, 3, 4, 5)).astype("float32") + 1J * np.random.random(
(2, 3, 4, 5)).astype("float32")
y = np.random.random((2, 3, 5, 4)).astype("float32")
self.compare(x, y)
self.compare_op(x, y)
self.compare_by_complex_api(x, y)
self.compare_op_by_complex_api(x, y)
def test_complex_y(self):
x = np.random.random((2, 3, 4, 5)).astype("float32")
y = np.random.random(
(2, 3, 5, 4)).astype("float32") + 1J * np.random.random(
(2, 3, 5, 4)).astype("float32")
self.compare(x, y)
self.compare_by_complex_api(x, y)
def test_complex128_xy(self):
def test_complex_xy_128(self):
x = np.random.random(
(2, 3, 4, 5)).astype("float64") + 1J * np.random.random(
(2, 3, 4, 5)).astype("float64")
y = np.random.random(
(2, 3, 5, 4)).astype("float64") + 1J * np.random.random(
(2, 3, 5, 4)).astype("float64")
self.compare_1(x, y)
self.compare_op_1(x, y)
self.compare_by_basic_api(x, y)
self.compare_op_by_basic_api(x, y)
def test_complex_xy_gemv(self):
x = np.random.random(
......@@ -124,35 +124,81 @@ class TestComplexMatMulLayer(unittest.TestCase):
(2, 1, 100)).astype("float32")
y = np.random.random((100)).astype("float32") + 1J * np.random.random(
(100)).astype("float32")
self.compare_1(x, y)
self.compare_op_1(x, y)
self.compare_by_basic_api(x, y)
self.compare_op_by_basic_api(x, y)
x = np.random.random(
(2, 1, 100)).astype("float64") + 1J * np.random.random(
(2, 1, 100)).astype("float64")
y = np.random.random((100)).astype("float64") + 1J * np.random.random(
(100)).astype("float64")
self.compare_1(x, y)
self.compare_op_1(x, y)
def test_complex_xy_gemm(self):
x = np.random.random(
(1, 2, 50)).astype("float32") + 1J * np.random.random(
(1, 2, 50)).astype("float32")
y = np.random.random(
(1, 50, 2)).astype("float32") + 1J * np.random.random(
(1, 50, 2)).astype("float32")
self.compare_1(x, y)
self.compare_op_1(x, y)
self.compare_by_basic_api(x, y)
self.compare_op_by_basic_api(x, y)
def test_complex_xy_gemm_128(self):
x = np.random.random(
(1, 2, 50)).astype("float64") + 1J * np.random.random(
(1, 2, 50)).astype("float64")
y = np.random.random(
(1, 50, 2)).astype("float64") + 1J * np.random.random(
(1, 50, 2)).astype("float64")
self.compare_1(x, y)
self.compare_op_1(x, y)
self.compare_by_basic_api(x, y)
self.compare_op_by_basic_api(x, y)
class TestComplexMatMulLayerGEMM(unittest.TestCase):
def setUp(self):
self._places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
self._places.append(fluid.CUDAPlace(0))
def compare_by_basic_api(self, x, y):
np_result = np.matmul(x, y)
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x,
place=place,
persistable=False,
zero_copy=None,
name='')
y_var = fluid.core.VarBase(
value=y,
place=place,
persistable=False,
zero_copy=None,
name='')
result = paddle.matmul(x_var, y_var)
self.assertTrue(np.allclose(result.numpy(), np_result))
def compare_op_by_basic_api(self, x, y):
np_result = np.matmul(x, y)
for place in self._places:
with dg.guard(place):
x_var = fluid.core.VarBase(
value=x,
place=place,
persistable=False,
zero_copy=None,
name='')
y_var = fluid.core.VarBase(
value=y,
place=place,
persistable=False,
zero_copy=None,
name='')
result = x_var.matmul(y_var)
self.assertTrue(np.allclose(result.numpy(), np_result))
def test_complex_xy_gemm_64(self):
x = np.random.random(
(1, 2, 50)).astype("float32") + 1J * np.random.random(
(1, 2, 50)).astype("float32")
y = np.random.random(
(1, 50, 2)).astype("float32") + 1J * np.random.random(
(1, 50, 2)).astype("float32")
self.compare_by_basic_api(x, y)
self.compare_op_by_basic_api(x, y)
if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册