未验证 提交 a57f0810 编写于 作者: Z zhiboniu 提交者: GitHub

remove linalg api in paddle.__init__ (#36112)



remove recent linalg api in paddle.init;
add args 'name' in some new linalg api interface
上级 5f168af7
...@@ -94,20 +94,12 @@ from .tensor.linalg import dot # noqa: F401 ...@@ -94,20 +94,12 @@ from .tensor.linalg import dot # noqa: F401
from .tensor.linalg import norm # noqa: F401 from .tensor.linalg import norm # noqa: F401
from .tensor.linalg import transpose # noqa: F401 from .tensor.linalg import transpose # noqa: F401
from .tensor.linalg import dist # noqa: F401 from .tensor.linalg import dist # noqa: F401
from .tensor.linalg import cond # noqa: F401
from .tensor.linalg import t # noqa: F401 from .tensor.linalg import t # noqa: F401
from .tensor.linalg import cross # noqa: F401 from .tensor.linalg import cross # noqa: F401
from .tensor.linalg import cholesky # noqa: F401 from .tensor.linalg import cholesky # noqa: F401
from .tensor.linalg import bmm # noqa: F401 from .tensor.linalg import bmm # noqa: F401
from .tensor.linalg import histogram # noqa: F401 from .tensor.linalg import histogram # noqa: F401
from .tensor.linalg import mv # noqa: F401 from .tensor.linalg import mv # noqa: F401
from .tensor.linalg import det # noqa: F401
from .tensor.linalg import slogdet # noqa: F401
from .tensor.linalg import multi_dot # noqa: F401
from .tensor.linalg import matrix_power # noqa: F401
from .tensor.linalg import svd # noqa: F401
from .tensor.linalg import pinv # noqa: F401
from .tensor.linalg import solve # noqa: F401
from .tensor.logic import equal # noqa: F401 from .tensor.logic import equal # noqa: F401
from .tensor.logic import greater_equal # noqa: F401 from .tensor.logic import greater_equal # noqa: F401
from .tensor.logic import greater_than # noqa: F401 from .tensor.logic import greater_than # noqa: F401
...@@ -506,7 +498,6 @@ __all__ = [ # noqa ...@@ -506,7 +498,6 @@ __all__ = [ # noqa
'stack', 'stack',
'sqrt', 'sqrt',
'cholesky', 'cholesky',
'matrix_power',
'randperm', 'randperm',
'linspace', 'linspace',
'reshape', 'reshape',
......
...@@ -28,7 +28,7 @@ def test_static_assert_true(self, x_list, p_list): ...@@ -28,7 +28,7 @@ def test_static_assert_true(self, x_list, p_list):
for x in x_list: for x in x_list:
with static.program_guard(static.Program(), static.Program()): with static.program_guard(static.Program(), static.Program()):
input_data = static.data("X", shape=x.shape, dtype=x.dtype) input_data = static.data("X", shape=x.shape, dtype=x.dtype)
output = paddle.cond(input_data, p) output = paddle.linalg.cond(input_data, p)
exe = static.Executor() exe = static.Executor()
result = exe.run(feed={"X": x}, fetch_list=[output]) result = exe.run(feed={"X": x}, fetch_list=[output])
expected_output = np.linalg.cond(x, p) expected_output = np.linalg.cond(x, p)
...@@ -39,7 +39,7 @@ def test_dygraph_assert_true(self, x_list, p_list): ...@@ -39,7 +39,7 @@ def test_dygraph_assert_true(self, x_list, p_list):
for p in p_list: for p in p_list:
for x in x_list: for x in x_list:
input_tensor = paddle.to_tensor(x) input_tensor = paddle.to_tensor(x)
output = paddle.cond(input_tensor, p) output = paddle.linalg.cond(input_tensor, p)
expected_output = np.linalg.cond(x, p) expected_output = np.linalg.cond(x, p)
self.assertTrue(np.allclose(output, expected_output)) self.assertTrue(np.allclose(output, expected_output))
...@@ -103,12 +103,12 @@ class TestCondAPIError(unittest.TestCase): ...@@ -103,12 +103,12 @@ class TestCondAPIError(unittest.TestCase):
for p in p_list_error: for p in p_list_error:
for x in (x_list_n_n + x_list_m_n): for x in (x_list_n_n + x_list_m_n):
x_tensor = paddle.to_tensor(x) x_tensor = paddle.to_tensor(x)
self.assertRaises(ValueError, paddle.cond, x_tensor, p) self.assertRaises(ValueError, paddle.linalg.cond, x_tensor, p)
for p in p_list_n_n: for p in p_list_n_n:
for x in x_list_m_n: for x in x_list_m_n:
x_tensor = paddle.to_tensor(x) x_tensor = paddle.to_tensor(x)
self.assertRaises(ValueError, paddle.cond, x_tensor, p) self.assertRaises(ValueError, paddle.linalg.cond, x_tensor, p)
def test_static_api_error(self): def test_static_api_error(self):
paddle.enable_static() paddle.enable_static()
...@@ -119,13 +119,13 @@ class TestCondAPIError(unittest.TestCase): ...@@ -119,13 +119,13 @@ class TestCondAPIError(unittest.TestCase):
for x in (x_list_n_n + x_list_m_n): for x in (x_list_n_n + x_list_m_n):
with static.program_guard(static.Program(), static.Program()): with static.program_guard(static.Program(), static.Program()):
x_data = static.data("X", shape=x.shape, dtype=x.dtype) x_data = static.data("X", shape=x.shape, dtype=x.dtype)
self.assertRaises(ValueError, paddle.cond, x_data, p) self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
for p in p_list_n_n: for p in p_list_n_n:
for x in x_list_m_n: for x in x_list_m_n:
with static.program_guard(static.Program(), static.Program()): with static.program_guard(static.Program(), static.Program()):
x_data = static.data("X", shape=x.shape, dtype=x.dtype) x_data = static.data("X", shape=x.shape, dtype=x.dtype)
self.assertRaises(ValueError, paddle.cond, x_data, p) self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
# it's not supported when input is an empty tensor in static mode # it's not supported when input is an empty tensor in static mode
def test_static_empty_input_error(self): def test_static_empty_input_error(self):
...@@ -136,13 +136,13 @@ class TestCondAPIError(unittest.TestCase): ...@@ -136,13 +136,13 @@ class TestCondAPIError(unittest.TestCase):
for x in x_list_n_n: for x in x_list_n_n:
with static.program_guard(static.Program(), static.Program()): with static.program_guard(static.Program(), static.Program()):
x_data = static.data("X", shape=x.shape, dtype=x.dtype) x_data = static.data("X", shape=x.shape, dtype=x.dtype)
self.assertRaises(ValueError, paddle.cond, x_data, p) self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
for p in (p_list_n_n + p_list_m_n): for p in (p_list_n_n + p_list_m_n):
for x in x_list_n_n: for x in x_list_n_n:
with static.program_guard(static.Program(), static.Program()): with static.program_guard(static.Program(), static.Program()):
x_data = static.data("X", shape=x.shape, dtype=x.dtype) x_data = static.data("X", shape=x.shape, dtype=x.dtype)
self.assertRaises(ValueError, paddle.cond, x_data, p) self.assertRaises(ValueError, paddle.linalg.cond, x_data, p)
class TestCondEmptyTensorInput(unittest.TestCase): class TestCondEmptyTensorInput(unittest.TestCase):
......
...@@ -198,32 +198,34 @@ class TestMultiDotOpError(unittest.TestCase): ...@@ -198,32 +198,34 @@ class TestMultiDotOpError(unittest.TestCase):
paddle.static.Program()): paddle.static.Program()):
# The inputs type of multi_dot must be list matrix. # The inputs type of multi_dot must be list matrix.
input1 = 12 input1 = 12
self.assertRaises(TypeError, paddle.multi_dot, [input1, input1]) self.assertRaises(TypeError, paddle.linalg.multi_dot,
[input1, input1])
# The inputs dtype of multi_dot must be float64, float64 or float16. # The inputs dtype of multi_dot must be float64, float64 or float16.
input2 = paddle.static.data( input2 = paddle.static.data(
name='input2', shape=[10, 10], dtype="int32") name='input2', shape=[10, 10], dtype="int32")
self.assertRaises(TypeError, paddle.multi_dot, [input2, input2]) self.assertRaises(TypeError, paddle.linalg.multi_dot,
[input2, input2])
# the number of tensor must be larger than 1 # the number of tensor must be larger than 1
x0 = paddle.static.data(name='x0', shape=[3, 2], dtype="float64") x0 = paddle.static.data(name='x0', shape=[3, 2], dtype="float64")
self.assertRaises(ValueError, paddle.multi_dot, [x0]) self.assertRaises(ValueError, paddle.linalg.multi_dot, [x0])
#the first tensor must be 1D or 2D #the first tensor must be 1D or 2D
x1 = paddle.static.data(name='x1', shape=[3, 2, 3], dtype="float64") x1 = paddle.static.data(name='x1', shape=[3, 2, 3], dtype="float64")
x2 = paddle.static.data(name='x2', shape=[3, 2], dtype="float64") x2 = paddle.static.data(name='x2', shape=[3, 2], dtype="float64")
self.assertRaises(ValueError, paddle.multi_dot, [x1, x2]) self.assertRaises(ValueError, paddle.linalg.multi_dot, [x1, x2])
#the last tensor must be 1D or 2D #the last tensor must be 1D or 2D
x3 = paddle.static.data(name='x3', shape=[3, 2], dtype="float64") x3 = paddle.static.data(name='x3', shape=[3, 2], dtype="float64")
x4 = paddle.static.data(name='x4', shape=[3, 2, 2], dtype="float64") x4 = paddle.static.data(name='x4', shape=[3, 2, 2], dtype="float64")
self.assertRaises(ValueError, paddle.multi_dot, [x3, x4]) self.assertRaises(ValueError, paddle.linalg.multi_dot, [x3, x4])
#the tensor must be 2D, except first and last tensor #the tensor must be 2D, except first and last tensor
x5 = paddle.static.data(name='x5', shape=[3, 2], dtype="float64") x5 = paddle.static.data(name='x5', shape=[3, 2], dtype="float64")
x6 = paddle.static.data(name='x6', shape=[2], dtype="float64") x6 = paddle.static.data(name='x6', shape=[2], dtype="float64")
x7 = paddle.static.data(name='x7', shape=[2, 2], dtype="float64") x7 = paddle.static.data(name='x7', shape=[2, 2], dtype="float64")
self.assertRaises(ValueError, paddle.multi_dot, [x5, x6, x7]) self.assertRaises(ValueError, paddle.linalg.multi_dot, [x5, x6, x7])
class APITestMultiDot(unittest.TestCase): class APITestMultiDot(unittest.TestCase):
...@@ -232,7 +234,7 @@ class APITestMultiDot(unittest.TestCase): ...@@ -232,7 +234,7 @@ class APITestMultiDot(unittest.TestCase):
with paddle.static.program_guard(paddle.static.Program()): with paddle.static.program_guard(paddle.static.Program()):
x0 = paddle.static.data(name='x0', shape=[3, 2], dtype="float64") x0 = paddle.static.data(name='x0', shape=[3, 2], dtype="float64")
x1 = paddle.static.data(name='x1', shape=[2, 3], dtype='float64') x1 = paddle.static.data(name='x1', shape=[2, 3], dtype='float64')
result = paddle.multi_dot([x0, x1]) result = paddle.linalg.multi_dot([x0, x1])
exe = paddle.static.Executor(paddle.CPUPlace()) exe = paddle.static.Executor(paddle.CPUPlace())
data1 = np.random.rand(3, 2).astype("float64") data1 = np.random.rand(3, 2).astype("float64")
data2 = np.random.rand(2, 3).astype("float64") data2 = np.random.rand(2, 3).astype("float64")
...@@ -254,7 +256,7 @@ class APITestMultiDot(unittest.TestCase): ...@@ -254,7 +256,7 @@ class APITestMultiDot(unittest.TestCase):
input_array2 = np.random.rand(4, 3).astype("float64") input_array2 = np.random.rand(4, 3).astype("float64")
data1 = paddle.to_tensor(input_array1) data1 = paddle.to_tensor(input_array1)
data2 = paddle.to_tensor(input_array2) data2 = paddle.to_tensor(input_array2)
out = paddle.multi_dot([data1, data2]) out = paddle.linalg.multi_dot([data1, data2])
expected_result = np.linalg.multi_dot([input_array1, input_array2]) expected_result = np.linalg.multi_dot([input_array1, input_array2])
self.assertTrue(np.allclose(expected_result, out.numpy())) self.assertTrue(np.allclose(expected_result, out.numpy()))
......
...@@ -448,7 +448,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): ...@@ -448,7 +448,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
format(axis)) format(axis))
def dist(x, y, p=2): def dist(x, y, p=2, name=None):
r""" r"""
This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure
...@@ -1251,7 +1251,7 @@ def bmm(x, y, name=None): ...@@ -1251,7 +1251,7 @@ def bmm(x, y, name=None):
return out return out
def histogram(input, bins=100, min=0, max=0): def histogram(input, bins=100, min=0, max=0, name=None):
""" """
Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max. Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max.
If min and max are both zero, the minimum and maximum values of the data are used. If min and max are both zero, the minimum and maximum values of the data are used.
...@@ -1351,7 +1351,7 @@ def mv(x, vec, name=None): ...@@ -1351,7 +1351,7 @@ def mv(x, vec, name=None):
return out return out
def det(x): def det(x, name=None):
""" """
Calculates determinant value of a square matrix or batches of square matrices. Calculates determinant value of a square matrix or batches of square matrices.
Args: Args:
...@@ -1367,7 +1367,7 @@ def det(x): ...@@ -1367,7 +1367,7 @@ def det(x):
x = paddle.randn([3,3,3]) x = paddle.randn([3,3,3])
A = paddle.det(x) A = paddle.linalg.det(x)
print(A) print(A)
...@@ -1399,7 +1399,7 @@ def det(x): ...@@ -1399,7 +1399,7 @@ def det(x):
return out return out
def slogdet(x): def slogdet(x, name=None):
""" """
Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant. Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant.
The determinant can be computed with ``sign * exp(logabsdet) The determinant can be computed with ``sign * exp(logabsdet)
...@@ -1422,7 +1422,7 @@ def slogdet(x): ...@@ -1422,7 +1422,7 @@ def slogdet(x):
x = paddle.randn([3,3,3]) x = paddle.randn([3,3,3])
A = paddle.slogdet(x) A = paddle.linalg.slogdet(x)
print(A) print(A)
...@@ -1563,17 +1563,17 @@ def matrix_power(x, n, name=None): ...@@ -1563,17 +1563,17 @@ def matrix_power(x, n, name=None):
x = paddle.to_tensor([[1, 2, 3], x = paddle.to_tensor([[1, 2, 3],
[1, 4, 9], [1, 4, 9],
[1, 8, 27]], dtype='float64') [1, 8, 27]], dtype='float64')
print(paddle.matrix_power(x, 2)) print(paddle.linalg.matrix_power(x, 2))
# [[6. , 34. , 102.], # [[6. , 34. , 102.],
# [14. , 90. , 282.], # [14. , 90. , 282.],
# [36. , 250., 804.]] # [36. , 250., 804.]]
print(paddle.matrix_power(x, 0)) print(paddle.linalg.matrix_power(x, 0))
# [[1., 0., 0.], # [[1., 0., 0.],
# [0., 1., 0.], # [0., 1., 0.],
# [0., 0., 1.]] # [0., 0., 1.]]
print(paddle.matrix_power(x, -2)) print(paddle.linalg.matrix_power(x, -2))
# [[ 12.91666667, -12.75000000, 2.83333333 ], # [[ 12.91666667, -12.75000000, 2.83333333 ],
# [-7.66666667 , 8. , -1.83333333 ], # [-7.66666667 , 8. , -1.83333333 ],
# [ 1.80555556 , -1.91666667 , 0.44444444 ]] # [ 1.80555556 , -1.91666667 , 0.44444444 ]]
...@@ -1699,7 +1699,7 @@ def multi_dot(x, name=None): ...@@ -1699,7 +1699,7 @@ def multi_dot(x, name=None):
B_data = np.random.random([4, 5]).astype(np.float32) B_data = np.random.random([4, 5]).astype(np.float32)
A = paddle.to_tensor(A_data) A = paddle.to_tensor(A_data)
B = paddle.to_tensor(B_data) B = paddle.to_tensor(B_data)
out = paddle.multi_dot([A, B]) out = paddle.linalg.multi_dot([A, B])
print(out.numpy().shape) print(out.numpy().shape)
# [3, 5] # [3, 5]
...@@ -1710,7 +1710,7 @@ def multi_dot(x, name=None): ...@@ -1710,7 +1710,7 @@ def multi_dot(x, name=None):
A = paddle.to_tensor(A_data) A = paddle.to_tensor(A_data)
B = paddle.to_tensor(B_data) B = paddle.to_tensor(B_data)
C = paddle.to_tensor(C_data) C = paddle.to_tensor(C_data)
out = paddle.multi_dot([A, B, C]) out = paddle.linalg.multi_dot([A, B, C])
print(out.numpy().shape) print(out.numpy().shape)
# [10, 7] # [10, 7]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册