From a259076dd01801e2e619237da02235a4856a96bb Mon Sep 17 00:00:00 2001 From: lilong12 Date: Wed, 5 May 2021 09:31:44 +0800 Subject: [PATCH] update, test=develop (#32726) --- paddle/fluid/pybind/op_function_generator.cc | 1 - python/paddle/distributed/collective.py | 46 ++++++++++---------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/paddle/fluid/pybind/op_function_generator.cc b/paddle/fluid/pybind/op_function_generator.cc index a340d7a0f00..bf3c7784321 100644 --- a/paddle/fluid/pybind/op_function_generator.cc +++ b/paddle/fluid/pybind/op_function_generator.cc @@ -44,7 +44,6 @@ std::map> op_ins_map = { {"gru_unit", {"Input", "HiddenPrev", "Weight", "Bias"}}, {"label_smooth", {"X", "PriorDist"}}, {"assign", {"X"}}, - {"send_v2", {"X"}}, {"reshape2", {"X", "Shape"}}, {"expand", {"X", "ExpandTimes"}}, {"slice", {"Input", "StartsTensor", "EndsTensor"}}, diff --git a/python/paddle/distributed/collective.py b/python/paddle/distributed/collective.py index 7aa765ba93f..bd7f5e5733b 100644 --- a/python/paddle/distributed/collective.py +++ b/python/paddle/distributed/collective.py @@ -1258,23 +1258,24 @@ def send(tensor, dst=0, group=None, use_calc_stream=True): tensor (Tensor): The Tensor to send. Its data type should be float16, float32, float64, int32 or int64. dst (int): The destination rank id. - group (Group): The group instance return by new_group or None for global default group. - use_calc_stream (bool): Whether to use calculate stream or communication stream. + group (Group, optional): The group instance return by new_group or None for global default group. Default: None. + use_calc_stream (bool, optional): Whether to use calculate stream or communication stream. Default: True. Returns: None. Examples: .. code-block:: python + # required: distributed import paddle - #from paddle.distributed import init_parallel_env - #init_parallel_env() - #if paddle.distributed.ParallelEnv().rank == 0: - # data = paddle.to_tensor([7, 8, 9]) - # paddle.distributed.send(data, dst=1) - #else: - # data = paddle.to_tensor([1,2,3]) - # paddle.distributed.recv(data, src=0) - #out = data.numpy() + from paddle.distributed import init_parallel_env + init_parallel_env() + if paddle.distributed.ParallelEnv().rank == 0: + data = paddle.to_tensor([7, 8, 9]) + paddle.distributed.send(data, dst=1) + else: + data = paddle.to_tensor([1,2,3]) + paddle.distributed.recv(data, src=0) + out = data.numpy() """ if group is not None and not group.is_member(): return @@ -1307,23 +1308,24 @@ def recv(tensor, src=0, group=None, use_calc_stream=True): tensor (Tensor): The Tensor to receive. Its data type should be float16, float32, float64, int32 or int64. src (int): The source rank id. - group (Group): The group instance return by new_group or None for global default group. - use_calc_stream (bool): Whether to use calculate stream or communication stream. + group (Group, optional): The group instance return by new_group or None for global default group. Default: None. + use_calc_stream (bool, optional): Whether to use calculate stream or communication stream. Default: True. Returns: None. Examples: .. code-block:: python + # required: distributed import paddle - #from paddle.distributed import init_parallel_env - #init_parallel_env() - #if paddle.distributed.ParallelEnv().rank == 0: - # data = paddle.to_tensor([7, 8, 9]) - # paddle.distributed.send(data, dst=1) - #else: - # data = paddle.to_tensor([1,2,3]) - # paddle.distributed.recv(data, src=0) - #out = data.numpy() + from paddle.distributed import init_parallel_env + init_parallel_env() + if paddle.distributed.ParallelEnv().rank == 0: + data = paddle.to_tensor([7, 8, 9]) + paddle.distributed.send(data, dst=1) + else: + data = paddle.to_tensor([1,2,3]) + paddle.distributed.recv(data, src=0) + out = data.numpy() """ if group is not None and not group.is_member(): return -- GitLab