未验证 提交 72a2af48 编写于 作者: 李季 提交者: GitHub

Fix the docs bug in collective file. (#34374)

* fix the docs bug in collective file
上级 539d7185
...@@ -1341,6 +1341,7 @@ def split(x, ...@@ -1341,6 +1341,7 @@ def split(x,
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: distributed # required: distributed
import paddle import paddle
import paddle.distributed.fleet as fleet import paddle.distributed.fleet as fleet
...@@ -1456,6 +1457,7 @@ def split(x, ...@@ -1456,6 +1457,7 @@ def split(x,
def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True): def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True):
""" """
Scatter tensors in in_tensor_list to all participators and gather the result tensors in out_tensor_list. Scatter tensors in in_tensor_list to all participators and gather the result tensors in out_tensor_list.
Args: Args:
in_tensor_list (list): A list of input Tensors. Every element in the list must be a Tensor whose data type in_tensor_list (list): A list of input Tensors. Every element in the list must be a Tensor whose data type
should be float16, float32, float64, int32 or int64. should be float16, float32, float64, int32 or int64.
...@@ -1463,14 +1465,18 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True): ...@@ -1463,14 +1465,18 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True):
data type of the input Tensors. data type of the input Tensors.
group (Group, optional): The group instance return by new_group or None for global default group. Default: None. group (Group, optional): The group instance return by new_group or None for global default group. Default: None.
use_calc_stream (bool, optional): Wether to use calculation stream (True) or communication stream. Default: True. use_calc_stream (bool, optional): Wether to use calculation stream (True) or communication stream. Default: True.
Returns: Returns:
None. None.
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: distributed # required: distributed
import numpy as np import numpy as np
import paddle import paddle
from paddle.distributed import init_parallel_env from paddle.distributed import init_parallel_env
init_parallel_env() init_parallel_env()
out_tensor_list = [] out_tensor_list = []
if paddle.distributed.ParallelEnv().rank == 0: if paddle.distributed.ParallelEnv().rank == 0:
...@@ -1535,14 +1541,17 @@ def send(tensor, dst=0, group=None, use_calc_stream=True): ...@@ -1535,14 +1541,17 @@ def send(tensor, dst=0, group=None, use_calc_stream=True):
dst (int): The destination rank id. dst (int): The destination rank id.
group (Group, optional): The group instance return by new_group or None for global default group. Default: None. group (Group, optional): The group instance return by new_group or None for global default group. Default: None.
use_calc_stream (bool, optional): Whether to use calculate stream or communication stream. Default: True. use_calc_stream (bool, optional): Whether to use calculate stream or communication stream. Default: True.
Returns: Returns:
None. None.
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: distributed # required: distributed
import paddle import paddle
from paddle.distributed import init_parallel_env from paddle.distributed import init_parallel_env
init_parallel_env() init_parallel_env()
if paddle.distributed.ParallelEnv().rank == 0: if paddle.distributed.ParallelEnv().rank == 0:
data = paddle.to_tensor([7, 8, 9]) data = paddle.to_tensor([7, 8, 9])
...@@ -1585,14 +1594,17 @@ def recv(tensor, src=0, group=None, use_calc_stream=True): ...@@ -1585,14 +1594,17 @@ def recv(tensor, src=0, group=None, use_calc_stream=True):
src (int): The source rank id. src (int): The source rank id.
group (Group, optional): The group instance return by new_group or None for global default group. Default: None. group (Group, optional): The group instance return by new_group or None for global default group. Default: None.
use_calc_stream (bool, optional): Whether to use calculate stream or communication stream. Default: True. use_calc_stream (bool, optional): Whether to use calculate stream or communication stream. Default: True.
Returns: Returns:
None. None.
Examples: Examples:
.. code-block:: python .. code-block:: python
# required: distributed # required: distributed
import paddle import paddle
from paddle.distributed import init_parallel_env from paddle.distributed import init_parallel_env
init_parallel_env() init_parallel_env()
if paddle.distributed.ParallelEnv().rank == 0: if paddle.distributed.ParallelEnv().rank == 0:
data = paddle.to_tensor([7, 8, 9]) data = paddle.to_tensor([7, 8, 9])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册