未验证 提交 093f6ecd 编写于 作者: Y yuchen202 提交者: GitHub

[xdoctest] reformat example code with google style in No.286-290 (#56797)

* 8.30下午

# doctest: +REQUIRES(env:GPU)这个不清楚怎么改,还请帮忙看一下

* Update softmax_mask_fuse.py

修改了相关内容

* Update graph_reindex.py

* Update graph_sample_neighbors.py

全都测了一遍,全都是successful

* fix cases on cpu

* fix gpu cases

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 e7246bb0
...@@ -80,33 +80,47 @@ def graph_reindex( ...@@ -80,33 +80,47 @@ def graph_reindex(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
x = [0, 1, 2] >>> x = [0, 1, 2]
neighbors_e1 = [8, 9, 0, 4, 7, 6, 7] >>> neighbors_e1 = [8, 9, 0, 4, 7, 6, 7]
count_e1 = [2, 3, 2] >>> count_e1 = [2, 3, 2]
x = paddle.to_tensor(x, dtype="int64") >>> x = paddle.to_tensor(x, dtype="int64")
neighbors_e1 = paddle.to_tensor(neighbors_e1, dtype="int64") >>> neighbors_e1 = paddle.to_tensor(neighbors_e1, dtype="int64")
count_e1 = paddle.to_tensor(count_e1, dtype="int32") >>> count_e1 = paddle.to_tensor(count_e1, dtype="int32")
reindex_src, reindex_dst, out_nodes = \ >>> reindex_src, reindex_dst, out_nodes = paddle.incubate.graph_reindex(
paddle.incubate.graph_reindex(x, neighbors_e1, count_e1) ... x,
# reindex_src: [3, 4, 0, 5, 6, 7, 6] ... neighbors_e1,
# reindex_dst: [0, 0, 1, 1, 1, 2, 2] ... count_e1,
# out_nodes: [0, 1, 2, 8, 9, 4, 7, 6] ... )
>>> print(reindex_src)
neighbors_e2 = [0, 2, 3, 5, 1] Tensor(shape=[7], dtype=int64, place=Place(cpu), stop_gradient=True,
count_e2 = [1, 3, 1] [3, 4, 0, 5, 6, 7, 6])
neighbors_e2 = paddle.to_tensor(neighbors_e2, dtype="int64") >>> print(reindex_dst)
count_e2 = paddle.to_tensor(count_e2, dtype="int32") Tensor(shape=[7], dtype=int64, place=Place(cpu), stop_gradient=True,
[0, 0, 1, 1, 1, 2, 2])
neighbors = paddle.concat([neighbors_e1, neighbors_e2]) >>> print(out_nodes)
count = paddle.concat([count_e1, count_e2]) Tensor(shape=[8], dtype=int64, place=Place(cpu), stop_gradient=True,
reindex_src, reindex_dst, out_nodes = \ [0, 1, 2, 8, 9, 4, 7, 6])
paddle.incubate.graph_reindex(x, neighbors, count)
# reindex_src: [3, 4, 0, 5, 6, 7, 6, 0, 2, 8, 9, 1] >>> neighbors_e2 = [0, 2, 3, 5, 1]
# reindex_dst: [0, 0, 1, 1, 1, 2, 2, 0, 1, 1, 1, 2] >>> count_e2 = [1, 3, 1]
# out_nodes: [0, 1, 2, 8, 9, 4, 7, 6, 3, 5] >>> neighbors_e2 = paddle.to_tensor(neighbors_e2, dtype="int64")
>>> count_e2 = paddle.to_tensor(count_e2, dtype="int32")
>>> neighbors = paddle.concat([neighbors_e1, neighbors_e2])
>>> count = paddle.concat([count_e1, count_e2])
>>> reindex_src, reindex_dst, out_nodes = paddle.incubate.graph_reindex(x, neighbors, count)
>>> print(reindex_src)
Tensor(shape=[12], dtype=int64, place=Place(cpu), stop_gradient=True,
[3, 4, 0, 5, 6, 7, 6, 0, 2, 8, 9, 1])
>>> print(reindex_dst)
Tensor(shape=[12], dtype=int64, place=Place(cpu), stop_gradient=True,
[0, 0, 1, 1, 1, 2, 2, 0, 1, 1, 1, 2])
>>> print(out_nodes)
Tensor(shape=[10], dtype=int64, place=Place(cpu), stop_gradient=True,
[0, 1, 2, 8, 9, 4, 7, 6, 3, 5])
""" """
if flag_buffer_hashtable: if flag_buffer_hashtable:
......
...@@ -80,19 +80,22 @@ def graph_sample_neighbors( ...@@ -80,19 +80,22 @@ def graph_sample_neighbors(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
# edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4), >>> # edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4),
# (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8) >>> # (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8)
row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7] >>> row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7]
colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13] >>> colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13]
nodes = [0, 8, 1, 2] >>> nodes = [0, 8, 1, 2]
sample_size = 2 >>> sample_size = 2
row = paddle.to_tensor(row, dtype="int64") >>> row = paddle.to_tensor(row, dtype="int64")
colptr = paddle.to_tensor(colptr, dtype="int64") >>> colptr = paddle.to_tensor(colptr, dtype="int64")
nodes = paddle.to_tensor(nodes, dtype="int64") >>> nodes = paddle.to_tensor(nodes, dtype="int64")
out_neighbors, out_count = \ >>> out_neighbors, out_count = paddle.incubate.graph_sample_neighbors(
paddle.incubate.graph_sample_neighbors(row, colptr, nodes, ... row,
sample_size=sample_size) ... colptr,
... nodes,
... sample_size=sample_size
... )
""" """
......
...@@ -91,30 +91,40 @@ def graph_send_recv( ...@@ -91,30 +91,40 @@ def graph_send_recv(
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") >>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32") >>> indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32")
src_index = indexes[:, 0] >>> src_index = indexes[:, 0]
dst_index = indexes[:, 1] >>> dst_index = indexes[:, 1]
out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum") >>> out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum")
# Outputs: [[0., 2., 3.], [2., 8., 10.], [1., 4., 5.]] >>> print(out)
Tensor(shape=[3, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") [[0. , 2. , 3. ],
indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32") [2. , 8. , 10.],
src_index = indexes[:, 0] [1. , 4. , 5. ]])
dst_index = indexes[:, 1]
out_size = paddle.max(dst_index) + 1 >>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum", out_size=out_size) >>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
# Outputs: [[0., 2., 3.], [[2., 8., 10.]]] >>> src_index = indexes[:, 0]
>>> dst_index = indexes[:, 1]
x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") >>> out_size = paddle.max(dst_index) + 1
indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32") >>> out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum", out_size=out_size)
src_index = indexes[:, 0] >>> print(out)
dst_index = indexes[:, 1] Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum") [[0. , 2. , 3. ],
# Outputs: [[0., 2., 3.], [2., 8., 10.], [0., 0., 0.]] [2. , 8. , 10.]])
>>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32")
>>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32")
>>> src_index = indexes[:, 0]
>>> dst_index = indexes[:, 1]
>>> out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum")
>>> print(out)
Tensor(shape=[3, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0. , 2. , 3. ],
[2. , 8. , 10.],
[0. , 0. , 0. ]])
""" """
if pool_type not in ["sum", "mean", "max", "min"]: if pool_type not in ["sum", "mean", "max", "min"]:
......
...@@ -40,20 +40,21 @@ def softmax_mask_fuse(x, mask, name=None): ...@@ -40,20 +40,21 @@ def softmax_mask_fuse(x, mask, name=None):
For more information, please refer to :ref:`api_guide_Name`. For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
4-D Tensor. A location into which the result is stored. Its dimension is 4D. Has same shape with x. 4-D Tensor. A location into which the result is stored. It's dimension is 4D. Has same shape with x.
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: gpu >>> # doctest: +REQUIRES(env:GPU)
import paddle >>> import paddle
import paddle.incubate as incubate >>> import paddle.incubate as incubate
x = paddle.rand([2, 8, 8, 32]) >>> x = paddle.rand([2, 8, 8, 32])
mask = paddle.rand([2, 1, 8, 32]) >>> mask = paddle.rand([2, 1, 8, 32])
rst = incubate.softmax_mask_fuse(x, mask) >>> rst = incubate.softmax_mask_fuse(x, mask)
# [[[[0.02404429, 0.04658398, 0.02746007, ..., 0.01489375, 0.02397441, 0.02851614] ... ]]] >>> rst.shape
[2, 8, 8, 32]
""" """
if in_dynamic_mode(): if in_dynamic_mode():
out = _legacy_C_ops.fused_softmax_mask(x, mask) out = _legacy_C_ops.fused_softmax_mask(x, mask)
......
...@@ -38,22 +38,35 @@ def softmax_mask_fuse_upper_triangle(x): ...@@ -38,22 +38,35 @@ def softmax_mask_fuse_upper_triangle(x):
The third dimension of x must be same with the fourth dimension of x. The third dimension of x must be same with the fourth dimension of x.
Returns: Returns:
4-D Tensor. A location into which the result is stored. Its dimension is 4D. Has same dimension with x. 4-D Tensor. A location into which the result is stored. It's dimension is 4D. Has same dimension with x.
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: gpu >>> # doctest: +REQUIRES(env:GPU)
import paddle >>> import paddle
import paddle.incubate as incubate >>> import paddle.incubate as incubate
x = paddle.rand((1, 1, 32, 32)) >>> paddle.seed(1)
>>> paddle.set_device("gpu")
>>> x = paddle.rand((1, 1, 32, 32))
rst = incubate.softmax_mask_fuse_upper_triangle(x) >>> rst = incubate.softmax_mask_fuse_upper_triangle(x)
# [[[[1. , 0. , 0. , ..., 0., 0., 0.], >>> print(rst)
# [0.45324376, 0.54675621, 0. , ..., 0., 0., 0.], Tensor(shape=[1, 1, 32, 32], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [0.32674268, 0.28156221, 0.39169508, ..., 0., 0., 0.] [[[[1. , 0. , 0. , ..., 0. ,
# ... ]]] 0. , 0. ],
[0.49575609, 0.50424391, 0. , ..., 0. ,
0. , 0. ],
[0.26035303, 0.25114325, 0.48850375, ..., 0. ,
0. , 0. ],
...,
[0.04379999, 0.04194880, 0.05150032, ..., 0.02721255,
0. , 0. ],
[0.02348574, 0.01959674, 0.02609110, ..., 0.04046615,
0.02248267, 0. ],
[0.02280738, 0.03144657, 0.02892209, ..., 0.03885521,
0.03342311, 0.02842640]]]])
""" """
if in_dynamic_mode(): if in_dynamic_mode():
out = _legacy_C_ops.fused_softmax_mask_upper_triangle(x) out = _legacy_C_ops.fused_softmax_mask_upper_triangle(x)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册