diff --git a/python/paddle/distributed/communication/all_gather.py b/python/paddle/distributed/communication/all_gather.py index 47844cebdf02e6e85a13e9c941d6ce49e5da3562..90ff8ca14f92f51d4522bc702aff2cddd7e1bf10 100644 --- a/python/paddle/distributed/communication/all_gather.py +++ b/python/paddle/distributed/communication/all_gather.py @@ -51,19 +51,19 @@ def all_gather(tensor_list, tensor, group=None, sync_op=True): Examples: .. code-block:: python - # required: distributed - import paddle - import paddle.distributed as dist - - dist.init_parallel_env() - tensor_list = [] - if dist.get_rank() == 0: - data = paddle.to_tensor([[4, 5, 6], [4, 5, 6]]) - else: - data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]]) - dist.all_gather(tensor_list, data) - print(tensor_list) - # [[[4, 5, 6], [4, 5, 6]], [[1, 2, 3], [1, 2, 3]]] (2 GPUs) + >>> # doctest: +REQUIRES(env: DISTRIBUTED) + >>> import paddle + >>> import paddle.distributed as dist + + >>> dist.init_parallel_env() + >>> tensor_list = [] + >>> if dist.get_rank() == 0: + ... data = paddle.to_tensor([[4, 5, 6], [4, 5, 6]]) + >>> else: + ... data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]]) + >>> dist.all_gather(tensor_list, data) + >>> print(tensor_list) + [[[4, 5, 6], [4, 5, 6]], [[1, 2, 3], [1, 2, 3]]] (2 GPUs) """ return stream.all_gather(tensor_list, tensor, group, sync_op) @@ -87,19 +87,19 @@ def all_gather_object(object_list, obj, group=None): Examples: .. code-block:: python - # required: distributed - import paddle - import paddle.distributed as dist - - dist.init_parallel_env() - object_list = [] - if dist.get_rank() == 0: - obj = {"foo": [1, 2, 3]} - else: - obj = {"bar": [4, 5, 6]} - dist.all_gather_object(object_list, obj) - print(object_list) - # [{'foo': [1, 2, 3]}, {'bar': [4, 5, 6]}] (2 GPUs) + >>> # doctest: +REQUIRES(env: DISTRIBUTED) + >>> import paddle + >>> import paddle.distributed as dist + + >>> dist.init_parallel_env() + >>> object_list = [] + >>> if dist.get_rank() == 0: + ... obj = {"foo": [1, 2, 3]} + >>> else: + ... obj = {"bar": [4, 5, 6]} + >>> dist.all_gather_object(object_list, obj) + >>> print(object_list) + [{'foo': [1, 2, 3]}, {'bar': [4, 5, 6]}] (2 GPUs) """ assert ( framework.in_dynamic_mode() diff --git a/python/paddle/distributed/communication/stream/all_gather.py b/python/paddle/distributed/communication/stream/all_gather.py index 83cbe7ac1fcc139c45f5753c5e05a95d4c164c7b..fc814956be9983085ee9c7f1ce171c9c15d3f1b5 100644 --- a/python/paddle/distributed/communication/stream/all_gather.py +++ b/python/paddle/distributed/communication/stream/all_gather.py @@ -145,21 +145,21 @@ def all_gather( Examples: .. code-block:: python - # required: distributed - import paddle - import paddle.distributed as dist - - dist.init_parallel_env() - local_rank = dist.get_rank() - tensor_list = [] - if local_rank == 0: - data = paddle.to_tensor([[4, 5, 6], [4, 5, 6]]) - else: - data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]]) - task = dist.stream.all_gather(tensor_list, data, sync_op=False) - task.wait() - print(tensor_list) - # [[[4, 5, 6], [4, 5, 6]], [[1, 2, 3], [1, 2, 3]]] (2 GPUs) + >>> # doctest: +REQUIRES(env: DISTRIBUTED) + >>> import paddle + >>> import paddle.distributed as dist + + >>> dist.init_parallel_env() + >>> local_rank = dist.get_rank() + >>> tensor_list = [] + >>> if local_rank == 0: + ... data = paddle.to_tensor([[4, 5, 6], [4, 5, 6]]) + >>> else: + ... data = paddle.to_tensor([[1, 2, 3], [1, 2, 3]]) + >>> task = dist.stream.all_gather(tensor_list, data, sync_op=False) + >>> task.wait() + >>> print(tensor_list) + [[[4, 5, 6], [4, 5, 6]], [[1, 2, 3], [1, 2, 3]]] (2 GPUs) """ if group is not None and not group.is_member(): raise RuntimeError(