未验证 提交 030b298e 编写于 作者: L lilong12 提交者: GitHub

fix sample codes in collective.py (#26787)

* fix sample codes, test=develop
上级 435ab2aa
...@@ -73,13 +73,14 @@ def broadcast(tensor, src, group=0): ...@@ -73,13 +73,14 @@ def broadcast(tensor, src, group=0):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
import paddle.prepare_context as prepare_context from paddle.distributed import init_parallel_env
paddle.disable_static() paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
prepare_context() init_parallel_env()
if paddle.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
np_data = np.array([[4, 5, 6], [4, 5, 6]]) np_data = np.array([[4, 5, 6], [4, 5, 6]])
else: else:
np_data = np.array([[1, 2, 3], [1, 2, 3]]) np_data = np.array([[1, 2, 3], [1, 2, 3]])
...@@ -129,14 +130,15 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=0): ...@@ -129,14 +130,15 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=0):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
from paddle.distributed import ReduceOp from paddle.distributed import ReduceOp
import paddle.prepare_context as prepare_context from paddle.distributed import init_parallel_env
paddle.disable_static() paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
prepare_context() init_parallel_env()
if paddle.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
np_data = np.array([[4, 5, 6], [4, 5, 6]]) np_data = np.array([[4, 5, 6], [4, 5, 6]])
else: else:
np_data = np.array([[1, 2, 3], [1, 2, 3]]) np_data = np.array([[1, 2, 3], [1, 2, 3]])
...@@ -204,13 +206,14 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=0): ...@@ -204,13 +206,14 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=0):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
import paddle.prepare_context as prepare_context from paddle.distributed import init_parallel_env
paddle.disable_static() paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
prepare_context() init_parallel_env()
if paddle.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
np_data = np.array([[4, 5, 6], [4, 5, 6]]) np_data = np.array([[4, 5, 6], [4, 5, 6]])
else: else:
np_data = np.array([[1, 2, 3], [1, 2, 3]]) np_data = np.array([[1, 2, 3], [1, 2, 3]])
...@@ -286,14 +289,15 @@ def all_gather(tensor_list, tensor, group=0): ...@@ -286,14 +289,15 @@ def all_gather(tensor_list, tensor, group=0):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
import paddle.prepare_context as prepare_context from paddle.distributed import init_parallel_env
paddle.disable_static() paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
prepare_context() init_parallel_env()
tensor_list = [] tensor_list = []
if paddle.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
np_data1 = np.array([[4, 5, 6], [4, 5, 6]]) np_data1 = np.array([[4, 5, 6], [4, 5, 6]])
np_data2 = np.array([[4, 5, 6], [4, 5, 6]]) np_data2 = np.array([[4, 5, 6], [4, 5, 6]])
data1 = paddle.to_tensor(np_data1) data1 = paddle.to_tensor(np_data1)
...@@ -304,7 +308,7 @@ def all_gather(tensor_list, tensor, group=0): ...@@ -304,7 +308,7 @@ def all_gather(tensor_list, tensor, group=0):
np_data2 = np.array([[1, 2, 3], [1, 2, 3]]) np_data2 = np.array([[1, 2, 3], [1, 2, 3]])
data1 = paddle.to_tensor(np_data1) data1 = paddle.to_tensor(np_data1)
data2 = paddle.to_tensor(np_data2) data2 = paddle.to_tensor(np_data2)
out = paddle.distributed.all_gather(tensor_list, data2) paddle.distributed.all_gather(tensor_list, data2)
""" """
op_type = 'c_allgather' op_type = 'c_allgather'
helper = LayerHelper(op_type, **locals()) helper = LayerHelper(op_type, **locals())
...@@ -359,13 +363,14 @@ def scatter(tensor, tensor_list=None, src=0, group=0): ...@@ -359,13 +363,14 @@ def scatter(tensor, tensor_list=None, src=0, group=0):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np
import paddle import paddle
import paddle.prepare_context as prepare_context from paddle.distributed import init_parallel_env
paddle.disable_static() paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
prepare_context() init_parallel_env()
if paddle.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
np_data1 = np.array([7, 8, 9]) np_data1 = np.array([7, 8, 9])
np_data2 = np.array([10, 11, 12]) np_data2 = np.array([10, 11, 12])
else: else:
...@@ -373,7 +378,7 @@ def scatter(tensor, tensor_list=None, src=0, group=0): ...@@ -373,7 +378,7 @@ def scatter(tensor, tensor_list=None, src=0, group=0):
np_data2 = np.array([4, 5, 6]) np_data2 = np.array([4, 5, 6])
data1 = paddle.to_tensor(np_data1) data1 = paddle.to_tensor(np_data1)
data2 = paddle.to_tensor(np_data2) data2 = paddle.to_tensor(np_data2)
if paddle.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
paddle.distributed.scatter(data1, src=1) paddle.distributed.scatter(data1, src=1)
else: else:
paddle.distributed.scatter(data1, tensor_list=[data1, data2], src=1) paddle.distributed.scatter(data1, tensor_list=[data1, data2], src=1)
...@@ -426,11 +431,11 @@ def barrier(group=0): ...@@ -426,11 +431,11 @@ def barrier(group=0):
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.prepare_context as prepare_context from paddle.distributed import init_parallel_env
paddle.disable_static() paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
prepare_context() init_parallel_env()
paddle.distributed.barrier() paddle.distributed.barrier()
""" """
op_type = 'barrier' op_type = 'barrier'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册