From 51c3c66bac3d59976b7b41ad46d47ca8fac0e519 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B0=8F=E9=A3=9E=E7=8C=AA?= <106524776+ooooo-create@users.noreply.github.com> Date: Tue, 29 Aug 2023 10:38:53 +0800 Subject: [PATCH] [xdoctest][task 181-183] reformat example code with google style in `sparse/multiary.py`,`distributed/auto_parallel/*` (#56665) * [Doctest]fix No.181-183, test=docs_preview * add env skip --- .../distributed/auto_parallel/interface.py | 34 ++++++++-------- .../distributed/auto_parallel/process_mesh.py | 10 ++--- python/paddle/sparse/multiary.py | 39 ++++++++++--------- 3 files changed, 43 insertions(+), 40 deletions(-) diff --git a/python/paddle/distributed/auto_parallel/interface.py b/python/paddle/distributed/auto_parallel/interface.py index 81f0133d31b..ba8003dcd86 100644 --- a/python/paddle/distributed/auto_parallel/interface.py +++ b/python/paddle/distributed/auto_parallel/interface.py @@ -56,13 +56,14 @@ def shard_tensor(x, process_mesh=None, shard_spec=None): Examples: .. code-block:: python - import paddle - from paddle.distributed.fleet import auto + >>> # doctest: +REQUIRES(env:DISTRIBUTED) + >>> import paddle + >>> from paddle.distributed.fleet import auto - mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]) - x = paddle.ones([4, 6]) - shard_spec = ["x", "y"] - auto.shard_tensor(x, mesh, shard_spec) + >>> mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]) + >>> x = paddle.ones([4, 6]) + >>> shard_spec = ["x", "y"] + >>> auto.shard_tensor(x, mesh, shard_spec) """ @@ -145,16 +146,17 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None): Examples: .. code-block:: python - import paddle - from paddle.distributed.fleet import auto - - x = paddle.ones([4, 6]) - y = paddle.zeros([4, 6]) - mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]) - dist_add = auto.shard_op(paddle.add, - in_shard_specs=[["x", "y"], ["y", None]], - out_shard_specs=[[None, "x"]]) - dist_add(x, y) + >>> import paddle + >>> from paddle.distributed.fleet import auto + + >>> x = paddle.ones([4, 6]) + >>> y = paddle.zeros([4, 6]) + >>> mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"]) + >>> dist_add = auto.shard_op(paddle.add, + ... mesh, + ... in_shard_specs=[["x", "y"], ["y", None]], + ... out_shard_specs=[[None, "x"]]) + >>> dist_add(x, y) """ diff --git a/python/paddle/distributed/auto_parallel/process_mesh.py b/python/paddle/distributed/auto_parallel/process_mesh.py index a6ad3355d7d..3abc04f32f5 100644 --- a/python/paddle/distributed/auto_parallel/process_mesh.py +++ b/python/paddle/distributed/auto_parallel/process_mesh.py @@ -81,12 +81,12 @@ class ProcessMesh(core.ProcessMesh): Examples: .. code-block:: python - import paddle - import paddle.distributed as dist + >>> import paddle + >>> import paddle.distributed as dist - mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) - assert mesh.shape == [2, 3] - assert mesh.process_ids == [2, 4, 5, 0, 1, 3] + >>> mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"]) + >>> assert mesh.shape == [2, 3] + >>> assert mesh.process_ids == [2, 4, 5, 0, 1, 3] """ diff --git a/python/paddle/sparse/multiary.py b/python/paddle/sparse/multiary.py index a09611d2d0f..8e3179f0bdc 100644 --- a/python/paddle/sparse/multiary.py +++ b/python/paddle/sparse/multiary.py @@ -58,25 +58,26 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): .. code-block:: python - # required: gpu - import paddle - - # dense + csr @ dense -> dense - input = paddle.rand([3, 2]) - crows = [0, 1, 2, 3] - cols = [1, 2, 0] - values = [1., 2., 3.] - x = paddle.sparse.sparse_csr_tensor(crows, cols, values, [3, 3]) - y = paddle.rand([3, 2]) - out = paddle.sparse.addmm(input, x, y, 3.0, 2.0) - - # dense + coo @ dense -> dense - input = paddle.rand([3, 2]) - indices = [[0, 1, 2], [1, 2, 0]] - values = [1., 2., 3.] - x = paddle.sparse.sparse_coo_tensor(indices, values, [3, 3]) - y = paddle.rand([3, 2]) - out = paddle.sparse.addmm(input, x, y, 3.0, 2.0) + >>> # doctest: +REQUIRES(env:GPU) + >>> import paddle + >>> paddle.device.set_device('gpu') + + >>> # dense + csr @ dense -> dense + >>> input = paddle.rand([3, 2]) + >>> crows = [0, 1, 2, 3] + >>> cols = [1, 2, 0] + >>> values = [1., 2., 3.] + >>> x = paddle.sparse.sparse_csr_tensor(crows, cols, values, [3, 3]) + >>> y = paddle.rand([3, 2]) + >>> out = paddle.sparse.addmm(input, x, y, 3.0, 2.0) + + >>> # dense + coo @ dense -> dense + >>> input = paddle.rand([3, 2]) + >>> indices = [[0, 1, 2], [1, 2, 0]] + >>> values = [1., 2., 3.] + >>> x = paddle.sparse.sparse_coo_tensor(indices, values, [3, 3]) + >>> y = paddle.rand([3, 2]) + >>> out = paddle.sparse.addmm(input, x, y, 3.0, 2.0) """ return _C_ops.sparse_addmm(input, x, y, beta, alpha) -- GitLab