未验证 提交 3bb4715e 编写于 作者: Z zhiboniu 提交者: GitHub

remove new linalg api in paddle.__init__ (#36151)

remove recent linalg api in paddle.init;
add args 'name' in some new linalg api interface
same change in develop branch to #36112
上级 af4f018a
...@@ -94,18 +94,12 @@ from .tensor.linalg import dot # noqa: F401 ...@@ -94,18 +94,12 @@ from .tensor.linalg import dot # noqa: F401
from .tensor.linalg import norm # noqa: F401 from .tensor.linalg import norm # noqa: F401
from .tensor.linalg import transpose # noqa: F401 from .tensor.linalg import transpose # noqa: F401
from .tensor.linalg import dist # noqa: F401 from .tensor.linalg import dist # noqa: F401
from .tensor.linalg import cond # noqa: F401
from .tensor.linalg import t # noqa: F401 from .tensor.linalg import t # noqa: F401
from .tensor.linalg import cross # noqa: F401 from .tensor.linalg import cross # noqa: F401
from .tensor.linalg import cholesky # noqa: F401 from .tensor.linalg import cholesky # noqa: F401
from .tensor.linalg import bmm # noqa: F401 from .tensor.linalg import bmm # noqa: F401
from .tensor.linalg import histogram # noqa: F401 from .tensor.linalg import histogram # noqa: F401
from .tensor.linalg import mv # noqa: F401 from .tensor.linalg import mv # noqa: F401
from .tensor.linalg import det # noqa: F401
from .tensor.linalg import slogdet # noqa: F401
from .tensor.linalg import matrix_power # noqa: F401
from .tensor.linalg import svd # noqa: F401
from .tensor.linalg import solve # noqa: F401
from .tensor.logic import equal # noqa: F401 from .tensor.logic import equal # noqa: F401
from .tensor.logic import greater_equal # noqa: F401 from .tensor.logic import greater_equal # noqa: F401
from .tensor.logic import greater_than # noqa: F401 from .tensor.logic import greater_than # noqa: F401
...@@ -504,7 +498,6 @@ __all__ = [ # noqa ...@@ -504,7 +498,6 @@ __all__ = [ # noqa
'stack', 'stack',
'sqrt', 'sqrt',
'cholesky', 'cholesky',
'matrix_power',
'randperm', 'randperm',
'linspace', 'linspace',
'reshape', 'reshape',
......
...@@ -28,7 +28,7 @@ def test_static_assert_true(self, x_list, p_list): ...@@ -28,7 +28,7 @@ def test_static_assert_true(self, x_list, p_list):
for x in x_list: for x in x_list:
with static.program_guard(static.Program(), static.Program()): with static.program_guard(static.Program(), static.Program()):
input_data = static.data("X", shape=x.shape, dtype=x.dtype) input_data = static.data("X", shape=x.shape, dtype=x.dtype)
output = paddle.cond(input_data, p) output = paddle.linalg.cond(input_data, p)
exe = static.Executor() exe = static.Executor()
result = exe.run(feed={"X": x}, fetch_list=[output]) result = exe.run(feed={"X": x}, fetch_list=[output])
expected_output = np.linalg.cond(x, p) expected_output = np.linalg.cond(x, p)
...@@ -39,7 +39,7 @@ def test_dygraph_assert_true(self, x_list, p_list): ...@@ -39,7 +39,7 @@ def test_dygraph_assert_true(self, x_list, p_list):
for p in p_list: for p in p_list:
for x in x_list: for x in x_list:
input_tensor = paddle.to_tensor(x) input_tensor = paddle.to_tensor(x)
output = paddle.cond(input_tensor, p) output = paddle.linalg.cond(input_tensor, p)
expected_output = np.linalg.cond(x, p) expected_output = np.linalg.cond(x, p)
self.assertTrue(np.allclose(output, expected_output)) self.assertTrue(np.allclose(output, expected_output))
...@@ -103,12 +103,12 @@ class TestCondAPIError(unittest.TestCase): ...@@ -103,12 +103,12 @@ class TestCondAPIError(unittest.TestCase):
for p in p_list_error: for p in p_list_error:
for x in (x_list_n_n + x_list_m_n): for x in (x_list_n_n + x_list_m_n):
x_tensor = paddle.to_tensor(x) x_tensor = paddle.to_tensor(x)
self.assertRaises(ValueError, paddle.cond, x_tensor, p) self.assertRaises(ValueError, paddle.linalg.cond, x_tensor, p)
for p in p_list_n_n: for p in p_list_n_n:
for x in x_list_m_n: for x in x_list_m_n:
x_tensor = paddle.to_tensor(x) x_tensor = paddle.to_tensor(x)
self.assertRaises(ValueError, paddle.cond, x_tensor, p) self.assertRaises(ValueError, paddle.linalg.cond, x_tensor, p)
def test_static_api_error(self): def test_static_api_error(self):
paddle.enable_static() paddle.enable_static()
...@@ -119,13 +119,13 @@ class TestCondAPIError(unittest.TestCase): ...@@ -119,13 +119,13 @@ class TestCondAPIError(unittest.TestCase):
for x in (x_list_n_n + x_list_m_n): for x in (x_list_n_n + x_list_m_n):
with static.program_guard(static.Program(), static.Program()): with static.program_guard(static.Program(), static.Program()):
x_data = static.data("X", shape=x.shape, dtype=x.dtype) x_data = static.data("X", shape=x.shape, dtype=x.dtype)
self.assertRaises(ValueError, paddle.cond, x_data, p) self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
for p in p_list_n_n: for p in p_list_n_n:
for x in x_list_m_n: for x in x_list_m_n:
with static.program_guard(static.Program(), static.Program()): with static.program_guard(static.Program(), static.Program()):
x_data = static.data("X", shape=x.shape, dtype=x.dtype) x_data = static.data("X", shape=x.shape, dtype=x.dtype)
self.assertRaises(ValueError, paddle.cond, x_data, p) self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
# it's not supported when input is an empty tensor in static mode # it's not supported when input is an empty tensor in static mode
def test_static_empty_input_error(self): def test_static_empty_input_error(self):
...@@ -136,13 +136,13 @@ class TestCondAPIError(unittest.TestCase): ...@@ -136,13 +136,13 @@ class TestCondAPIError(unittest.TestCase):
for x in x_list_n_n: for x in x_list_n_n:
with static.program_guard(static.Program(), static.Program()): with static.program_guard(static.Program(), static.Program()):
x_data = static.data("X", shape=x.shape, dtype=x.dtype) x_data = static.data("X", shape=x.shape, dtype=x.dtype)
self.assertRaises(ValueError, paddle.cond, x_data, p) self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
for p in (p_list_n_n + p_list_m_n): for p in (p_list_n_n + p_list_m_n):
for x in x_list_n_n: for x in x_list_n_n:
with static.program_guard(static.Program(), static.Program()): with static.program_guard(static.Program(), static.Program()):
x_data = static.data("X", shape=x.shape, dtype=x.dtype) x_data = static.data("X", shape=x.shape, dtype=x.dtype)
self.assertRaises(ValueError, paddle.cond, x_data, p) self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
class TestCondEmptyTensorInput(unittest.TestCase): class TestCondEmptyTensorInput(unittest.TestCase):
......
...@@ -448,7 +448,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): ...@@ -448,7 +448,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
format(axis)) format(axis))
def dist(x, y, p=2): def dist(x, y, p=2, name=None):
r""" r"""
This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure
...@@ -1251,7 +1251,7 @@ def bmm(x, y, name=None): ...@@ -1251,7 +1251,7 @@ def bmm(x, y, name=None):
return out return out
def histogram(input, bins=100, min=0, max=0): def histogram(input, bins=100, min=0, max=0, name=None):
""" """
Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max.
If min and max are both zero, the minimum and maximum values of the data are used. If min and max are both zero, the minimum and maximum values of the data are used.
...@@ -1351,7 +1351,7 @@ def mv(x, vec, name=None): ...@@ -1351,7 +1351,7 @@ def mv(x, vec, name=None):
return out return out
def det(x): def det(x, name=None):
""" """
Calculates determinant value of a square matrix or batches of square matrices. Calculates determinant value of a square matrix or batches of square matrices.
Args: Args:
...@@ -1367,7 +1367,7 @@ def det(x): ...@@ -1367,7 +1367,7 @@ def det(x):
x = paddle.randn([3,3,3]) x = paddle.randn([3,3,3])
A = paddle.det(x) A = paddle.linalg.det(x)
print(A) print(A)
...@@ -1399,7 +1399,7 @@ def det(x): ...@@ -1399,7 +1399,7 @@ def det(x):
return out return out
def slogdet(x): def slogdet(x, name=None):
""" """
Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant.
The determinant can be computed with ``sign * exp(logabsdet) The determinant can be computed with ``sign * exp(logabsdet)
...@@ -1422,7 +1422,7 @@ def slogdet(x): ...@@ -1422,7 +1422,7 @@ def slogdet(x):
x = paddle.randn([3,3,3]) x = paddle.randn([3,3,3])
A = paddle.slogdet(x) A = paddle.linalg.slogdet(x)
print(A) print(A)
...@@ -1563,17 +1563,17 @@ def matrix_power(x, n, name=None): ...@@ -1563,17 +1563,17 @@ def matrix_power(x, n, name=None):
x = paddle.to_tensor([[1, 2, 3], x = paddle.to_tensor([[1, 2, 3],
[1, 4, 9], [1, 4, 9],
[1, 8, 27]], dtype='float64') [1, 8, 27]], dtype='float64')
print(paddle.matrix_power(x, 2)) print(paddle.linalg.matrix_power(x, 2))
# [[6. , 34. , 102.], # [[6. , 34. , 102.],
# [14. , 90. , 282.], # [14. , 90. , 282.],
# [36. , 250., 804.]] # [36. , 250., 804.]]
print(paddle.matrix_power(x, 0)) print(paddle.linalg.matrix_power(x, 0))
# [[1., 0., 0.], # [[1., 0., 0.],
# [0., 1., 0.], # [0., 1., 0.],
# [0., 0., 1.]] # [0., 0., 1.]]
print(paddle.matrix_power(x, -2)) print(paddle.linalg.matrix_power(x, -2))
# [[ 12.91666667, -12.75000000, 2.83333333 ], # [[ 12.91666667, -12.75000000, 2.83333333 ],
# [-7.66666667 , 8. , -1.83333333 ], # [-7.66666667 , 8. , -1.83333333 ],
# [ 1.80555556 , -1.91666667 , 0.44444444 ]] # [ 1.80555556 , -1.91666667 , 0.44444444 ]]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册