未验证 提交 51c3c66b 编写于 作者: 小飞猪 提交者: GitHub

[xdoctest][task 181-183] reformat example code with google style in...

[xdoctest][task 181-183] reformat example code with google style in `sparse/multiary.py`,`distributed/auto_parallel/*` (#56665)

* [Doctest]fix No.181-183, test=docs_preview

* add env skip
上级 11421705
......@@ -56,13 +56,14 @@ def shard_tensor(x, process_mesh=None, shard_spec=None):
Examples:
.. code-block:: python
import paddle
from paddle.distributed.fleet import auto
>>> # doctest: +REQUIRES(env:DISTRIBUTED)
>>> import paddle
>>> from paddle.distributed.fleet import auto
mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
x = paddle.ones([4, 6])
shard_spec = ["x", "y"]
auto.shard_tensor(x, mesh, shard_spec)
>>> mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
>>> x = paddle.ones([4, 6])
>>> shard_spec = ["x", "y"]
>>> auto.shard_tensor(x, mesh, shard_spec)
"""
......@@ -145,16 +146,17 @@ def shard_op(op, process_mesh=None, in_shard_specs=None, out_shard_specs=None):
Examples:
.. code-block:: python
import paddle
from paddle.distributed.fleet import auto
x = paddle.ones([4, 6])
y = paddle.zeros([4, 6])
mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
dist_add = auto.shard_op(paddle.add,
in_shard_specs=[["x", "y"], ["y", None]],
out_shard_specs=[[None, "x"]])
dist_add(x, y)
>>> import paddle
>>> from paddle.distributed.fleet import auto
>>> x = paddle.ones([4, 6])
>>> y = paddle.zeros([4, 6])
>>> mesh = auto.ProcessMesh([[0, 1], [2, 3]], dim_names=["x", "y"])
>>> dist_add = auto.shard_op(paddle.add,
... mesh,
... in_shard_specs=[["x", "y"], ["y", None]],
... out_shard_specs=[[None, "x"]])
>>> dist_add(x, y)
"""
......
......@@ -81,12 +81,12 @@ class ProcessMesh(core.ProcessMesh):
Examples:
.. code-block:: python
import paddle
import paddle.distributed as dist
>>> import paddle
>>> import paddle.distributed as dist
mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"])
assert mesh.shape == [2, 3]
assert mesh.process_ids == [2, 4, 5, 0, 1, 3]
>>> mesh = dist.ProcessMesh([[2, 4, 5], [0, 1, 3]], dim_names=["x", "y"])
>>> assert mesh.shape == [2, 3]
>>> assert mesh.process_ids == [2, 4, 5, 0, 1, 3]
"""
......
......@@ -58,25 +58,26 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
.. code-block:: python
# required: gpu
import paddle
# dense + csr @ dense -> dense
input = paddle.rand([3, 2])
crows = [0, 1, 2, 3]
cols = [1, 2, 0]
values = [1., 2., 3.]
x = paddle.sparse.sparse_csr_tensor(crows, cols, values, [3, 3])
y = paddle.rand([3, 2])
out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
# dense + coo @ dense -> dense
input = paddle.rand([3, 2])
indices = [[0, 1, 2], [1, 2, 0]]
values = [1., 2., 3.]
x = paddle.sparse.sparse_coo_tensor(indices, values, [3, 3])
y = paddle.rand([3, 2])
out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
>>> # doctest: +REQUIRES(env:GPU)
>>> import paddle
>>> paddle.device.set_device('gpu')
>>> # dense + csr @ dense -> dense
>>> input = paddle.rand([3, 2])
>>> crows = [0, 1, 2, 3]
>>> cols = [1, 2, 0]
>>> values = [1., 2., 3.]
>>> x = paddle.sparse.sparse_csr_tensor(crows, cols, values, [3, 3])
>>> y = paddle.rand([3, 2])
>>> out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
>>> # dense + coo @ dense -> dense
>>> input = paddle.rand([3, 2])
>>> indices = [[0, 1, 2], [1, 2, 0]]
>>> values = [1., 2., 3.]
>>> x = paddle.sparse.sparse_coo_tensor(indices, values, [3, 3])
>>> y = paddle.rand([3, 2])
>>> out = paddle.sparse.addmm(input, x, y, 3.0, 2.0)
"""
return _C_ops.sparse_addmm(input, x, y, beta, alpha)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册