diff --git a/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst b/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst index 543b479eeb656355cff3cd0af6f4d9847809704e..6e20328810accc515b95694ad47e3030d42ed7d3 100644 --- a/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst @@ -22,14 +22,15 @@ all_gather ::::::::: .. code-block:: python + import numpy as np import paddle - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() tensor_list = [] - if paddle.ParallelEnv().local_rank == 0: + if paddle.distributed.ParallelEnv().local_rank == 0: np_data1 = np.array([[4, 5, 6], [4, 5, 6]]) np_data2 = np.array([[4, 5, 6], [4, 5, 6]]) data1 = paddle.to_tensor(np_data1) @@ -40,4 +41,4 @@ all_gather np_data2 = np.array([[1, 2, 3], [1, 2, 3]]) data1 = paddle.to_tensor(np_data1) data2 = paddle.to_tensor(np_data2) - out = paddle.distributed.all_gather(tensor_list, data2) + paddle.distributed.all_gather(tensor_list, data2) diff --git a/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst b/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst index de20db9b3cac3fedcdaa3975f26f36e3ac9c4694..0ee8efd0cbd115d6e6a6b14bbad7a03903de28fa 100644 --- a/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst @@ -22,17 +22,19 @@ all_reduce ::::::::: .. code-block:: python + import numpy as np import paddle from paddle.distributed import ReduceOp - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() - if paddle.ParallelEnv().local_rank == 0: + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: np_data = np.array([[4, 5, 6], [4, 5, 6]]) else: np_data = np.array([[1, 2, 3], [1, 2, 3]]) data = paddle.to_tensor(np_data) paddle.distributed.all_reduce(data) out = data.numpy() + # [[5, 7, 9], [5, 7, 9]] diff --git a/doc/fluid/api_cn/distributed_cn/barrier_cn.rst b/doc/fluid/api_cn/distributed_cn/barrier_cn.rst index 60b8e9cefcb8420cee18b70ec854162445cef7e2..fb63526cfd0163cec91396144b2e0c3c0f9beace 100644 --- a/doc/fluid/api_cn/distributed_cn/barrier_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/barrier_cn.rst @@ -21,9 +21,9 @@ barrier .. code-block:: python import paddle - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() paddle.distributed.barrier() diff --git a/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst b/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst index b6776c15aff19a601ac870bdb85fe38f2e76b1d6..3022faaa83a0a9c10ad6e4c25ba644805fd141ed 100644 --- a/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst @@ -22,13 +22,14 @@ broadcast ::::::::: .. code-block:: python + import numpy as np import paddle - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() - if paddle.ParallelEnv().local_rank == 0: + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: np_data = np.array([[4, 5, 6], [4, 5, 6]]) else: np_data = np.array([[1, 2, 3], [1, 2, 3]]) diff --git a/doc/fluid/api_cn/distributed_cn/reduce_cn.rst b/doc/fluid/api_cn/distributed_cn/reduce_cn.rst index 5dfaefb597910de5e46addc154ee7bf15a170786..0f223f81b6ca0d7f714992df095039ebcab343a9 100644 --- a/doc/fluid/api_cn/distributed_cn/reduce_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/reduce_cn.rst @@ -23,13 +23,14 @@ reduce ::::::::: .. code-block:: python + import numpy as np import paddle - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() - if paddle.ParallelEnv().local_rank == 0: + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: np_data = np.array([[4, 5, 6], [4, 5, 6]]) else: np_data = np.array([[1, 2, 3], [1, 2, 3]]) diff --git a/doc/fluid/api_cn/distributed_cn/scatter_cn.rst b/doc/fluid/api_cn/distributed_cn/scatter_cn.rst index 97b347f4464888c02e697600533e9720899ac7c3..1afea49e14bec02da69f125a525e4a24ad9d2a88 100644 --- a/doc/fluid/api_cn/distributed_cn/scatter_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/scatter_cn.rst @@ -23,13 +23,14 @@ scatter ::::::::: .. code-block:: python + import numpy as np import paddle - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() - if paddle.ParallelEnv().local_rank == 0: + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: np_data1 = np.array([7, 8, 9]) np_data2 = np.array([10, 11, 12]) else: @@ -37,7 +38,7 @@ scatter np_data2 = np.array([4, 5, 6]) data1 = paddle.to_tensor(np_data1) data2 = paddle.to_tensor(np_data2) - if paddle.ParallelEnv().local_rank == 0: + if paddle.distributed.ParallelEnv().local_rank == 0: paddle.distributed.scatter(data1, src=1) else: paddle.distributed.scatter(data1, tensor_list=[data1, data2], src=1)