未验证 提交 5113aae6 编写于 作者: S ShenLiang 提交者: GitHub

fix the doc of inverse, dot, cholesky (#25860)

* fix the doc of inverse, dot, cholesky
上级 cfa086bd
...@@ -90,5 +90,15 @@ class TestCholeskyOp2D(TestCholeskyOp): ...@@ -90,5 +90,15 @@ class TestCholeskyOp2D(TestCholeskyOp):
self._input_shape = (64, 64) self._input_shape = (64, 64)
class TestDygraph(unittest.TestCase):
def test_dygraph(self):
paddle.disable_static()
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_variable(x_data)
out = paddle.cholesky(x, upper=False)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -89,8 +89,7 @@ class TestInverseAPI(unittest.TestCase): ...@@ -89,8 +89,7 @@ class TestInverseAPI(unittest.TestCase):
def check_static_result(self, place): def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[4, 4], dtype="float64") input = fluid.data(name="input", shape=[4, 4], dtype="float64")
result = paddle.inverse(input=input) result = paddle.inverse(x=input)
input_np = np.random.random([4, 4]).astype("float64") input_np = np.random.random([4, 4]).astype("float64")
result_np = np.linalg.inv(input_np) result_np = np.linalg.inv(input_np)
...@@ -145,7 +144,7 @@ class TestInverseSingularAPI(unittest.TestCase): ...@@ -145,7 +144,7 @@ class TestInverseSingularAPI(unittest.TestCase):
def check_static_result(self, place): def check_static_result(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
input = fluid.data(name="input", shape=[4, 4], dtype="float64") input = fluid.data(name="input", shape=[4, 4], dtype="float64")
result = paddle.inverse(input=input) result = paddle.inverse(x=input)
input_np = np.zeros([4, 4]).astype("float64") input_np = np.zeros([4, 4]).astype("float64")
......
...@@ -452,21 +452,18 @@ def dist(x, y, p=2): ...@@ -452,21 +452,18 @@ def dist(x, y, p=2):
def dot(x, y, name=None): def dot(x, y, name=None):
""" """
:alias_main: paddle.dot
:alias: paddle.dot,paddle.tensor.dot,paddle.tensor.linalg.dot
This operator calculates inner product for vectors. This operator calculates inner product for vectors.
.. note:: .. note::
Only support 1-d Tensor(vector). Only support 1-d Tensor(vector).
Parameters: Parameters:
x(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64`` x(Tensor): 1-D ``Tensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64``
y(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64`` y(Tensor): 1-D ``Tensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64``
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Returns: Returns:
Variable: the calculated result Tensor/LoDTensor. Variable: the calculated result Tensor.
Examples: Examples:
...@@ -476,9 +473,11 @@ def dot(x, y, name=None): ...@@ -476,9 +473,11 @@ def dot(x, y, name=None):
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
with fluid.dygraph.guard(): paddle.disable_static()
x = fluid.dygraph.to_variable(np.random.uniform(0.1, 1, [10]).astype(np.float32)) x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)
y = fluid.dygraph.to_variable(np.random.uniform(1, 3, [10]).astype(np.float32)) y_data = np.random.uniform(1, 3, [10]).astype(np.float32)
x = paddle.to_variable(x_data)
y = paddle.to_variable(y_data)
z = paddle.dot(x, y) z = paddle.dot(x, y)
print(z.numpy()) print(z.numpy())
...@@ -651,11 +650,8 @@ def cross(x, y, axis=None, name=None): ...@@ -651,11 +650,8 @@ def cross(x, y, axis=None, name=None):
return out return out
def cholesky(x, upper=False): def cholesky(x, upper=False, name=None):
""" """
:alias_main: paddle.cholesky
:alias: paddle.cholesky,paddle.tensor.cholesky,paddle.tensor.linalg.cholesky
Computes the Cholesky decomposition of one symmetric positive-definite Computes the Cholesky decomposition of one symmetric positive-definite
matrix or batches of symmetric positive-definite matrice. matrix or batches of symmetric positive-definite matrice.
...@@ -680,14 +676,13 @@ def cholesky(x, upper=False): ...@@ -680,14 +676,13 @@ def cholesky(x, upper=False):
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.fluid as fluid
import numpy as np import numpy as np
with fluid.dygraph.guard(): paddle.disable_static()
a = np.random.rand(3, 3) a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0]) a_t = np.transpose(a, [1, 0])
x = np.matmul(a, a_t) + 1e-03 x_data = np.matmul(a, a_t) + 1e-03
x = fluid.dygraph.to_variable(x) x = paddle.to_variable(x_data)
out = paddle.cholesky(x, upper=False) out = paddle.cholesky(x, upper=False)
print(out.numpy()) print(out.numpy())
# [[1.190523 0. 0. ] # [[1.190523 0. 0. ]
...@@ -695,6 +690,8 @@ def cholesky(x, upper=False): ...@@ -695,6 +690,8 @@ def cholesky(x, upper=False):
# [1.25450498 0.05600871 0.06400121]] # [1.25450498 0.05600871 0.06400121]]
""" """
if in_dygraph_mode():
return core.ops.cholesky(x, "upper", upper)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky')
check_type(upper, 'upper', bool, 'cholesky') check_type(upper, 'upper', bool, 'cholesky')
helper = LayerHelper('cholesky', **locals()) helper = LayerHelper('cholesky', **locals())
......
...@@ -1099,17 +1099,15 @@ def logsumexp(x, dim=None, keepdim=False, name=None): ...@@ -1099,17 +1099,15 @@ def logsumexp(x, dim=None, keepdim=False, name=None):
return layers.log(sum_out, name) return layers.log(sum_out, name)
def inverse(input, name=None):
"""
:alias_main: paddle.inverse
:alias: paddle.inverse,paddle.tensor.inverse,paddle.tensor.math.inverse
def inverse(x, name=None):
"""
Takes the inverse of the square matrix. A square matrix is a matrix with Takes the inverse of the square matrix. A square matrix is a matrix with
the same number of rows and columns. The input can be a square matrix the same number of rows and columns. The input can be a square matrix
(2-D Tensor) or batches of square matrices. (2-D Tensor) or batches of square matrices.
Args: Args:
input (Variable): The input Variable which holds a Tensor. The last two x (Variable): The input tensor. The last two
dimensions should be equal. When the number of dimensions is dimensions should be equal. When the number of dimensions is
greater than 2, it is treated as batches of square matrix. The data greater than 2, it is treated as batches of square matrix. The data
type can be float32 and float64. type can be float32 and float64.
...@@ -1118,52 +1116,38 @@ def inverse(input, name=None): ...@@ -1118,52 +1116,38 @@ def inverse(input, name=None):
please refer to :ref:`api_guide_Name` please refer to :ref:`api_guide_Name`
Returns: Returns:
Variable: A Tensor holds the inverse of input. The shape and data type Variable: A Tensor holds the inverse of x. The shape and data type
is the same as input. is the same as x.
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid
mat_np = np.array([[2, 0], [0, 2]]).astype("float32") mat_np = np.array([[2, 0], [0, 2]]).astype("float32")
paddle.disable_static()
# example for static graph mat = paddle.to_variable(mat_np)
input = fluid.data("input", shape=[2, 2], dtype="float32")
out = paddle.inverse(input)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
results = exe.run(feed={"input": mat_np },
fetch_list=[out.name])
print(results[0]) # [[0.5, 0], [0, 0.5]]
# example for dynamic graph
with fluid.dygraph.guard():
mat = fluid.dygraph.to_variable(mat_np)
inv = paddle.inverse(mat) inv = paddle.inverse(mat)
print(inv) # [[0.5, 0], [0, 0.5]] print(inv) # [[0.5, 0], [0, 0.5]]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return core.ops.inverse(input) return core.ops.inverse(x)
def _check_input(input): def _check_input(x):
check_variable_and_dtype(input, 'input', check_variable_and_dtype(x, 'x',
['float32', 'float64'], 'inverse') ['float32', 'float64'], 'inverse')
if len(input.shape) < 2: if len(x.shape) < 2:
raise ValueError( raise ValueError(
"The input of inverse is expected to be a Tensor whose number " "The input of inverse is expected to be a Tensor whose number "
"of dimensions is no less than 2. But reviced: %d, " "of dimensions is no less than 2. But reviced: %d, "
"input's shape: %s." % (len(input.shape), input.shape)) "x's shape: %s." % (len(x.shape), x.shape))
_check_input(x)
_check_input(input)
helper = LayerHelper('inverse', **locals()) helper = LayerHelper('inverse', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='inverse', inputs={'Input': [input] }, outputs={'Output': [out]}) type='inverse', inputs={'Input': [x] }, outputs={'Output': [out]})
return out return out
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册