diff --git a/python/paddle/distributed/collective.py b/python/paddle/distributed/collective.py index df6df856222299f7ff0751086aedd344c020f8bf..f1dcf55a56a9656ee3ccc34caa63f1499ad6bd40 100644 --- a/python/paddle/distributed/collective.py +++ b/python/paddle/distributed/collective.py @@ -1341,6 +1341,7 @@ def split(x, Examples: .. code-block:: python + # required: distributed import paddle import paddle.distributed.fleet as fleet @@ -1456,6 +1457,7 @@ def split(x, def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True): """ Scatter tensors in in_tensor_list to all participators and gather the result tensors in out_tensor_list. + Args: in_tensor_list (list): A list of input Tensors. Every element in the list must be a Tensor whose data type should be float16, float32, float64, int32 or int64. @@ -1463,14 +1465,18 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True): data type of the input Tensors. group (Group, optional): The group instance return by new_group or None for global default group. Default: None. use_calc_stream (bool, optional): Wether to use calculation stream (True) or communication stream. Default: True. + Returns: None. + Examples: .. code-block:: python + # required: distributed import numpy as np import paddle from paddle.distributed import init_parallel_env + init_parallel_env() out_tensor_list = [] if paddle.distributed.ParallelEnv().rank == 0: @@ -1535,14 +1541,17 @@ def send(tensor, dst=0, group=None, use_calc_stream=True): dst (int): The destination rank id. group (Group, optional): The group instance return by new_group or None for global default group. Default: None. use_calc_stream (bool, optional): Whether to use calculate stream or communication stream. Default: True. + Returns: None. Examples: .. code-block:: python + # required: distributed import paddle from paddle.distributed import init_parallel_env + init_parallel_env() if paddle.distributed.ParallelEnv().rank == 0: data = paddle.to_tensor([7, 8, 9]) @@ -1585,14 +1594,17 @@ def recv(tensor, src=0, group=None, use_calc_stream=True): src (int): The source rank id. group (Group, optional): The group instance return by new_group or None for global default group. Default: None. use_calc_stream (bool, optional): Whether to use calculate stream or communication stream. Default: True. + Returns: None. Examples: .. code-block:: python + # required: distributed import paddle from paddle.distributed import init_parallel_env + init_parallel_env() if paddle.distributed.ParallelEnv().rank == 0: data = paddle.to_tensor([7, 8, 9])