From 093f6ecdc9890a725e2a7c94ef397b131c4193d2 Mon Sep 17 00:00:00 2001 From: yuchen202 <103028470+yuchen202@users.noreply.github.com> Date: Thu, 7 Sep 2023 10:03:55 +0800 Subject: [PATCH] [xdoctest] reformat example code with google style in No.286-290 (#56797) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 8.30下午 # doctest: +REQUIRES(env:GPU)这个不清楚怎么改,还请帮忙看一下 * Update softmax_mask_fuse.py 修改了相关内容 * Update graph_reindex.py * Update graph_sample_neighbors.py 全都测了一遍,全都是successful * fix cases on cpu * fix gpu cases --------- Co-authored-by: SigureMo --- .../incubate/operators/graph_reindex.py | 68 +++++++++++-------- .../operators/graph_sample_neighbors.py | 29 ++++---- .../incubate/operators/graph_send_recv.py | 58 +++++++++------- .../incubate/operators/softmax_mask_fuse.py | 17 ++--- .../softmax_mask_fuse_upper_triangle.py | 33 ++++++--- 5 files changed, 123 insertions(+), 82 deletions(-) diff --git a/python/paddle/incubate/operators/graph_reindex.py b/python/paddle/incubate/operators/graph_reindex.py index 2594ed7ce05..26f55b0fa2e 100644 --- a/python/paddle/incubate/operators/graph_reindex.py +++ b/python/paddle/incubate/operators/graph_reindex.py @@ -80,33 +80,47 @@ def graph_reindex( Examples: .. code-block:: python - import paddle - - x = [0, 1, 2] - neighbors_e1 = [8, 9, 0, 4, 7, 6, 7] - count_e1 = [2, 3, 2] - x = paddle.to_tensor(x, dtype="int64") - neighbors_e1 = paddle.to_tensor(neighbors_e1, dtype="int64") - count_e1 = paddle.to_tensor(count_e1, dtype="int32") - - reindex_src, reindex_dst, out_nodes = \ - paddle.incubate.graph_reindex(x, neighbors_e1, count_e1) - # reindex_src: [3, 4, 0, 5, 6, 7, 6] - # reindex_dst: [0, 0, 1, 1, 1, 2, 2] - # out_nodes: [0, 1, 2, 8, 9, 4, 7, 6] - - neighbors_e2 = [0, 2, 3, 5, 1] - count_e2 = [1, 3, 1] - neighbors_e2 = paddle.to_tensor(neighbors_e2, dtype="int64") - count_e2 = paddle.to_tensor(count_e2, dtype="int32") - - neighbors = paddle.concat([neighbors_e1, neighbors_e2]) - count = paddle.concat([count_e1, count_e2]) - reindex_src, reindex_dst, out_nodes = \ - paddle.incubate.graph_reindex(x, neighbors, count) - # reindex_src: [3, 4, 0, 5, 6, 7, 6, 0, 2, 8, 9, 1] - # reindex_dst: [0, 0, 1, 1, 1, 2, 2, 0, 1, 1, 1, 2] - # out_nodes: [0, 1, 2, 8, 9, 4, 7, 6, 3, 5] + >>> import paddle + + >>> x = [0, 1, 2] + >>> neighbors_e1 = [8, 9, 0, 4, 7, 6, 7] + >>> count_e1 = [2, 3, 2] + >>> x = paddle.to_tensor(x, dtype="int64") + >>> neighbors_e1 = paddle.to_tensor(neighbors_e1, dtype="int64") + >>> count_e1 = paddle.to_tensor(count_e1, dtype="int32") + + >>> reindex_src, reindex_dst, out_nodes = paddle.incubate.graph_reindex( + ... x, + ... neighbors_e1, + ... count_e1, + ... ) + >>> print(reindex_src) + Tensor(shape=[7], dtype=int64, place=Place(cpu), stop_gradient=True, + [3, 4, 0, 5, 6, 7, 6]) + >>> print(reindex_dst) + Tensor(shape=[7], dtype=int64, place=Place(cpu), stop_gradient=True, + [0, 0, 1, 1, 1, 2, 2]) + >>> print(out_nodes) + Tensor(shape=[8], dtype=int64, place=Place(cpu), stop_gradient=True, + [0, 1, 2, 8, 9, 4, 7, 6]) + + >>> neighbors_e2 = [0, 2, 3, 5, 1] + >>> count_e2 = [1, 3, 1] + >>> neighbors_e2 = paddle.to_tensor(neighbors_e2, dtype="int64") + >>> count_e2 = paddle.to_tensor(count_e2, dtype="int32") + + >>> neighbors = paddle.concat([neighbors_e1, neighbors_e2]) + >>> count = paddle.concat([count_e1, count_e2]) + >>> reindex_src, reindex_dst, out_nodes = paddle.incubate.graph_reindex(x, neighbors, count) + >>> print(reindex_src) + Tensor(shape=[12], dtype=int64, place=Place(cpu), stop_gradient=True, + [3, 4, 0, 5, 6, 7, 6, 0, 2, 8, 9, 1]) + >>> print(reindex_dst) + Tensor(shape=[12], dtype=int64, place=Place(cpu), stop_gradient=True, + [0, 0, 1, 1, 1, 2, 2, 0, 1, 1, 1, 2]) + >>> print(out_nodes) + Tensor(shape=[10], dtype=int64, place=Place(cpu), stop_gradient=True, + [0, 1, 2, 8, 9, 4, 7, 6, 3, 5]) """ if flag_buffer_hashtable: diff --git a/python/paddle/incubate/operators/graph_sample_neighbors.py b/python/paddle/incubate/operators/graph_sample_neighbors.py index 169acca5fdc..66e8d5f2fa5 100644 --- a/python/paddle/incubate/operators/graph_sample_neighbors.py +++ b/python/paddle/incubate/operators/graph_sample_neighbors.py @@ -80,19 +80,22 @@ def graph_sample_neighbors( Examples: .. code-block:: python - import paddle - # edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4), - # (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8) - row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7] - colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13] - nodes = [0, 8, 1, 2] - sample_size = 2 - row = paddle.to_tensor(row, dtype="int64") - colptr = paddle.to_tensor(colptr, dtype="int64") - nodes = paddle.to_tensor(nodes, dtype="int64") - out_neighbors, out_count = \ - paddle.incubate.graph_sample_neighbors(row, colptr, nodes, - sample_size=sample_size) + >>> import paddle + >>> # edges: (3, 0), (7, 0), (0, 1), (9, 1), (1, 2), (4, 3), (2, 4), + >>> # (9, 5), (3, 5), (9, 6), (1, 6), (9, 8), (7, 8) + >>> row = [3, 7, 0, 9, 1, 4, 2, 9, 3, 9, 1, 9, 7] + >>> colptr = [0, 2, 4, 5, 6, 7, 9, 11, 11, 13, 13] + >>> nodes = [0, 8, 1, 2] + >>> sample_size = 2 + >>> row = paddle.to_tensor(row, dtype="int64") + >>> colptr = paddle.to_tensor(colptr, dtype="int64") + >>> nodes = paddle.to_tensor(nodes, dtype="int64") + >>> out_neighbors, out_count = paddle.incubate.graph_sample_neighbors( + ... row, + ... colptr, + ... nodes, + ... sample_size=sample_size + ... ) """ diff --git a/python/paddle/incubate/operators/graph_send_recv.py b/python/paddle/incubate/operators/graph_send_recv.py index 7a874f19249..84bc656f348 100644 --- a/python/paddle/incubate/operators/graph_send_recv.py +++ b/python/paddle/incubate/operators/graph_send_recv.py @@ -91,30 +91,40 @@ def graph_send_recv( .. code-block:: python - import paddle - - x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") - indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32") - src_index = indexes[:, 0] - dst_index = indexes[:, 1] - out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum") - # Outputs: [[0., 2., 3.], [2., 8., 10.], [1., 4., 5.]] - - x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") - indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32") - src_index = indexes[:, 0] - dst_index = indexes[:, 1] - out_size = paddle.max(dst_index) + 1 - out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum", out_size=out_size) - # Outputs: [[0., 2., 3.], [[2., 8., 10.]]] - - x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") - indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32") - src_index = indexes[:, 0] - dst_index = indexes[:, 1] - out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum") - # Outputs: [[0., 2., 3.], [2., 8., 10.], [0., 0., 0.]] - + >>> import paddle + + >>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") + >>> indexes = paddle.to_tensor([[0, 1], [1, 2], [2, 1], [0, 0]], dtype="int32") + >>> src_index = indexes[:, 0] + >>> dst_index = indexes[:, 1] + >>> out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum") + >>> print(out) + Tensor(shape=[3, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0. , 2. , 3. ], + [2. , 8. , 10.], + [1. , 4. , 5. ]]) + + >>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") + >>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32") + >>> src_index = indexes[:, 0] + >>> dst_index = indexes[:, 1] + >>> out_size = paddle.max(dst_index) + 1 + >>> out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum", out_size=out_size) + >>> print(out) + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0. , 2. , 3. ], + [2. , 8. , 10.]]) + + >>> x = paddle.to_tensor([[0, 2, 3], [1, 4, 5], [2, 6, 7]], dtype="float32") + >>> indexes = paddle.to_tensor([[0, 1], [2, 1], [0, 0]], dtype="int32") + >>> src_index = indexes[:, 0] + >>> dst_index = indexes[:, 1] + >>> out = paddle.incubate.graph_send_recv(x, src_index, dst_index, pool_type="sum") + >>> print(out) + Tensor(shape=[3, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0. , 2. , 3. ], + [2. , 8. , 10.], + [0. , 0. , 0. ]]) """ if pool_type not in ["sum", "mean", "max", "min"]: diff --git a/python/paddle/incubate/operators/softmax_mask_fuse.py b/python/paddle/incubate/operators/softmax_mask_fuse.py index 178cfd9a046..c9a3539ee09 100644 --- a/python/paddle/incubate/operators/softmax_mask_fuse.py +++ b/python/paddle/incubate/operators/softmax_mask_fuse.py @@ -40,20 +40,21 @@ def softmax_mask_fuse(x, mask, name=None): For more information, please refer to :ref:`api_guide_Name`. Returns: - 4-D Tensor. A location into which the result is stored. It’s dimension is 4D. Has same shape with x. + 4-D Tensor. A location into which the result is stored. It's dimension is 4D. Has same shape with x. Examples: .. code-block:: python - # required: gpu - import paddle - import paddle.incubate as incubate + >>> # doctest: +REQUIRES(env:GPU) + >>> import paddle + >>> import paddle.incubate as incubate - x = paddle.rand([2, 8, 8, 32]) - mask = paddle.rand([2, 1, 8, 32]) + >>> x = paddle.rand([2, 8, 8, 32]) + >>> mask = paddle.rand([2, 1, 8, 32]) - rst = incubate.softmax_mask_fuse(x, mask) - # [[[[0.02404429, 0.04658398, 0.02746007, ..., 0.01489375, 0.02397441, 0.02851614] ... ]]] + >>> rst = incubate.softmax_mask_fuse(x, mask) + >>> rst.shape + [2, 8, 8, 32] """ if in_dynamic_mode(): out = _legacy_C_ops.fused_softmax_mask(x, mask) diff --git a/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py b/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py index dd8e229a1e9..ae02603f27f 100644 --- a/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py +++ b/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py @@ -38,22 +38,35 @@ def softmax_mask_fuse_upper_triangle(x): The third dimension of x must be same with the fourth dimension of x. Returns: - 4-D Tensor. A location into which the result is stored. It’s dimension is 4D. Has same dimension with x. + 4-D Tensor. A location into which the result is stored. It's dimension is 4D. Has same dimension with x. Examples: .. code-block:: python - # required: gpu - import paddle - import paddle.incubate as incubate + >>> # doctest: +REQUIRES(env:GPU) + >>> import paddle + >>> import paddle.incubate as incubate - x = paddle.rand((1, 1, 32, 32)) + >>> paddle.seed(1) + >>> paddle.set_device("gpu") + >>> x = paddle.rand((1, 1, 32, 32)) - rst = incubate.softmax_mask_fuse_upper_triangle(x) - # [[[[1. , 0. , 0. , ..., 0., 0., 0.], - # [0.45324376, 0.54675621, 0. , ..., 0., 0., 0.], - # [0.32674268, 0.28156221, 0.39169508, ..., 0., 0., 0.] - # ... ]]] + >>> rst = incubate.softmax_mask_fuse_upper_triangle(x) + >>> print(rst) + Tensor(shape=[1, 1, 32, 32], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [[[[1. , 0. , 0. , ..., 0. , + 0. , 0. ], + [0.49575609, 0.50424391, 0. , ..., 0. , + 0. , 0. ], + [0.26035303, 0.25114325, 0.48850375, ..., 0. , + 0. , 0. ], + ..., + [0.04379999, 0.04194880, 0.05150032, ..., 0.02721255, + 0. , 0. ], + [0.02348574, 0.01959674, 0.02609110, ..., 0.04046615, + 0.02248267, 0. ], + [0.02280738, 0.03144657, 0.02892209, ..., 0.03885521, + 0.03342311, 0.02842640]]]]) """ if in_dynamic_mode(): out = _legacy_C_ops.fused_softmax_mask_upper_triangle(x) -- GitLab