From 5113aae6eabd4a3d34367b84cb6f6ccad19f0be8 Mon Sep 17 00:00:00 2001 From: ShenLiang <2282912238@qq.com> Date: Thu, 13 Aug 2020 16:49:26 +0800 Subject: [PATCH] fix the doc of inverse, dot, cholesky (#25860) * fix the doc of inverse, dot, cholesky --- .../fluid/tests/unittests/test_cholesky_op.py | 10 ++++ .../fluid/tests/unittests/test_inverse_op.py | 5 +- python/paddle/tensor/linalg.py | 51 +++++++++---------- python/paddle/tensor/math.py | 50 +++++++----------- 4 files changed, 53 insertions(+), 63 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_cholesky_op.py b/python/paddle/fluid/tests/unittests/test_cholesky_op.py index 4e2280c0118..f3e6c079eed 100644 --- a/python/paddle/fluid/tests/unittests/test_cholesky_op.py +++ b/python/paddle/fluid/tests/unittests/test_cholesky_op.py @@ -90,5 +90,15 @@ class TestCholeskyOp2D(TestCholeskyOp): self._input_shape = (64, 64) +class TestDygraph(unittest.TestCase): + def test_dygraph(self): + paddle.disable_static() + a = np.random.rand(3, 3) + a_t = np.transpose(a, [1, 0]) + x_data = np.matmul(a, a_t) + 1e-03 + x = paddle.to_variable(x_data) + out = paddle.cholesky(x, upper=False) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_inverse_op.py b/python/paddle/fluid/tests/unittests/test_inverse_op.py index 5349654ac27..fd540dcd741 100644 --- a/python/paddle/fluid/tests/unittests/test_inverse_op.py +++ b/python/paddle/fluid/tests/unittests/test_inverse_op.py @@ -89,8 +89,7 @@ class TestInverseAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): input = fluid.data(name="input", shape=[4, 4], dtype="float64") - result = paddle.inverse(input=input) - + result = paddle.inverse(x=input) input_np = np.random.random([4, 4]).astype("float64") result_np = np.linalg.inv(input_np) @@ -145,7 +144,7 @@ class TestInverseSingularAPI(unittest.TestCase): def check_static_result(self, place): with fluid.program_guard(fluid.Program(), fluid.Program()): input = fluid.data(name="input", shape=[4, 4], dtype="float64") - result = paddle.inverse(input=input) + result = paddle.inverse(x=input) input_np = np.zeros([4, 4]).astype("float64") diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 306e683f8ae..972c9fbce4d 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -452,21 +452,18 @@ def dist(x, y, p=2): def dot(x, y, name=None): """ - :alias_main: paddle.dot - :alias: paddle.dot,paddle.tensor.dot,paddle.tensor.linalg.dot - This operator calculates inner product for vectors. .. note:: Only support 1-d Tensor(vector). Parameters: - x(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64`` - y(Variable): 1-D ``Tensor`` or ``LoDTensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64`` + x(Tensor): 1-D ``Tensor``. Its datatype should be ``float32``, ``float64``, ``int32``, ``int64`` + y(Tensor): 1-D ``Tensor``. Its datatype soulde be ``float32``, ``float64``, ``int32``, ``int64`` name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name` Returns: - Variable: the calculated result Tensor/LoDTensor. + Variable: the calculated result Tensor. Examples: @@ -475,12 +472,14 @@ def dot(x, y, name=None): import paddle import paddle.fluid as fluid import numpy as np - - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(np.random.uniform(0.1, 1, [10]).astype(np.float32)) - y = fluid.dygraph.to_variable(np.random.uniform(1, 3, [10]).astype(np.float32)) - z = paddle.dot(x, y) - print(z.numpy()) + + paddle.disable_static() + x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32) + y_data = np.random.uniform(1, 3, [10]).astype(np.float32) + x = paddle.to_variable(x_data) + y = paddle.to_variable(y_data) + z = paddle.dot(x, y) + print(z.numpy()) """ op_type = 'dot' @@ -651,11 +650,8 @@ def cross(x, y, axis=None, name=None): return out -def cholesky(x, upper=False): +def cholesky(x, upper=False, name=None): """ - :alias_main: paddle.cholesky - :alias: paddle.cholesky,paddle.tensor.cholesky,paddle.tensor.linalg.cholesky - Computes the Cholesky decomposition of one symmetric positive-definite matrix or batches of symmetric positive-definite matrice. @@ -680,21 +676,22 @@ def cholesky(x, upper=False): .. code-block:: python import paddle - import paddle.fluid as fluid import numpy as np - with fluid.dygraph.guard(): - a = np.random.rand(3, 3) - a_t = np.transpose(a, [1, 0]) - x = np.matmul(a, a_t) + 1e-03 - x = fluid.dygraph.to_variable(x) - out = paddle.cholesky(x, upper=False) - print(out.numpy()) - # [[1.190523 0. 0. ] - # [0.9906703 0.27676893 0. ] - # [1.25450498 0.05600871 0.06400121]] + paddle.disable_static() + a = np.random.rand(3, 3) + a_t = np.transpose(a, [1, 0]) + x_data = np.matmul(a, a_t) + 1e-03 + x = paddle.to_variable(x_data) + out = paddle.cholesky(x, upper=False) + print(out.numpy()) + # [[1.190523 0. 0. ] + # [0.9906703 0.27676893 0. ] + # [1.25450498 0.05600871 0.06400121]] """ + if in_dygraph_mode(): + return core.ops.cholesky(x, "upper", upper) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') check_type(upper, 'upper', bool, 'cholesky') helper = LayerHelper('cholesky', **locals()) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index edb488a3e59..8827a0dab39 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1099,17 +1099,15 @@ def logsumexp(x, dim=None, keepdim=False, name=None): return layers.log(sum_out, name) -def inverse(input, name=None): - """ - :alias_main: paddle.inverse - :alias: paddle.inverse,paddle.tensor.inverse,paddle.tensor.math.inverse +def inverse(x, name=None): + """ Takes the inverse of the square matrix. A square matrix is a matrix with the same number of rows and columns. The input can be a square matrix (2-D Tensor) or batches of square matrices. Args: - input (Variable): The input Variable which holds a Tensor. The last two + x (Variable): The input tensor. The last two dimensions should be equal. When the number of dimensions is greater than 2, it is treated as batches of square matrix. The data type can be float32 and float64. @@ -1118,52 +1116,38 @@ def inverse(input, name=None): please refer to :ref:`api_guide_Name` Returns: - Variable: A Tensor holds the inverse of input. The shape and data type - is the same as input. + Variable: A Tensor holds the inverse of x. The shape and data type + is the same as x. Examples: .. code-block:: python import numpy as np import paddle - import paddle.fluid as fluid mat_np = np.array([[2, 0], [0, 2]]).astype("float32") + paddle.disable_static() + mat = paddle.to_variable(mat_np) + inv = paddle.inverse(mat) + print(inv) # [[0.5, 0], [0, 0.5]] - # example for static graph - input = fluid.data("input", shape=[2, 2], dtype="float32") - out = paddle.inverse(input) - - place = fluid.CPUPlace() - exe = fluid.Executor(place) - results = exe.run(feed={"input": mat_np }, - fetch_list=[out.name]) - print(results[0]) # [[0.5, 0], [0, 0.5]] - - # example for dynamic graph - with fluid.dygraph.guard(): - mat = fluid.dygraph.to_variable(mat_np) - inv = paddle.inverse(mat) - print(inv) # [[0.5, 0], [0, 0.5]] """ if in_dygraph_mode(): - return core.ops.inverse(input) + return core.ops.inverse(x) - def _check_input(input): - check_variable_and_dtype(input, 'input', + def _check_input(x): + check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'inverse') - if len(input.shape) < 2: + if len(x.shape) < 2: raise ValueError( "The input of inverse is expected to be a Tensor whose number " "of dimensions is no less than 2. But reviced: %d, " - "input's shape: %s." % (len(input.shape), input.shape)) - - _check_input(input) - + "x's shape: %s." % (len(x.shape), x.shape)) + _check_input(x) helper = LayerHelper('inverse', **locals()) - out = helper.create_variable_for_type_inference(dtype=input.dtype) + out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( - type='inverse', inputs={'Input': [input] }, outputs={'Output': [out]}) + type='inverse', inputs={'Input': [x] }, outputs={'Output': [out]}) return out -- GitLab