diff --git a/paddle/fluid/pybind/op_function_generator.cc b/paddle/fluid/pybind/op_function_generator.cc index a340d7a0f00d975def3a5b437529ffc641fa9487..bf3c77843219c75f9cf4a75f340eaa71f972991d 100644 --- a/paddle/fluid/pybind/op_function_generator.cc +++ b/paddle/fluid/pybind/op_function_generator.cc @@ -44,7 +44,6 @@ std::map> op_ins_map = { {"gru_unit", {"Input", "HiddenPrev", "Weight", "Bias"}}, {"label_smooth", {"X", "PriorDist"}}, {"assign", {"X"}}, - {"send_v2", {"X"}}, {"reshape2", {"X", "Shape"}}, {"expand", {"X", "ExpandTimes"}}, {"slice", {"Input", "StartsTensor", "EndsTensor"}}, diff --git a/python/paddle/distributed/collective.py b/python/paddle/distributed/collective.py index 7aa765ba93fbe34b09c92ea55b700f3337440554..bd7f5e5733bf5016511b591c5036d77906587320 100644 --- a/python/paddle/distributed/collective.py +++ b/python/paddle/distributed/collective.py @@ -1258,23 +1258,24 @@ def send(tensor, dst=0, group=None, use_calc_stream=True): tensor (Tensor): The Tensor to send. Its data type should be float16, float32, float64, int32 or int64. dst (int): The destination rank id. - group (Group): The group instance return by new_group or None for global default group. - use_calc_stream (bool): Whether to use calculate stream or communication stream. + group (Group, optional): The group instance return by new_group or None for global default group. Default: None. + use_calc_stream (bool, optional): Whether to use calculate stream or communication stream. Default: True. Returns: None. Examples: .. code-block:: python + # required: distributed import paddle - #from paddle.distributed import init_parallel_env - #init_parallel_env() - #if paddle.distributed.ParallelEnv().rank == 0: - # data = paddle.to_tensor([7, 8, 9]) - # paddle.distributed.send(data, dst=1) - #else: - # data = paddle.to_tensor([1,2,3]) - # paddle.distributed.recv(data, src=0) - #out = data.numpy() + from paddle.distributed import init_parallel_env + init_parallel_env() + if paddle.distributed.ParallelEnv().rank == 0: + data = paddle.to_tensor([7, 8, 9]) + paddle.distributed.send(data, dst=1) + else: + data = paddle.to_tensor([1,2,3]) + paddle.distributed.recv(data, src=0) + out = data.numpy() """ if group is not None and not group.is_member(): return @@ -1307,23 +1308,24 @@ def recv(tensor, src=0, group=None, use_calc_stream=True): tensor (Tensor): The Tensor to receive. Its data type should be float16, float32, float64, int32 or int64. src (int): The source rank id. - group (Group): The group instance return by new_group or None for global default group. - use_calc_stream (bool): Whether to use calculate stream or communication stream. + group (Group, optional): The group instance return by new_group or None for global default group. Default: None. + use_calc_stream (bool, optional): Whether to use calculate stream or communication stream. Default: True. Returns: None. Examples: .. code-block:: python + # required: distributed import paddle - #from paddle.distributed import init_parallel_env - #init_parallel_env() - #if paddle.distributed.ParallelEnv().rank == 0: - # data = paddle.to_tensor([7, 8, 9]) - # paddle.distributed.send(data, dst=1) - #else: - # data = paddle.to_tensor([1,2,3]) - # paddle.distributed.recv(data, src=0) - #out = data.numpy() + from paddle.distributed import init_parallel_env + init_parallel_env() + if paddle.distributed.ParallelEnv().rank == 0: + data = paddle.to_tensor([7, 8, 9]) + paddle.distributed.send(data, dst=1) + else: + data = paddle.to_tensor([1,2,3]) + paddle.distributed.recv(data, src=0) + out = data.numpy() """ if group is not None and not group.is_member(): return