From d95c24a06ab8d99011b14f40a7c383528dad5db4 Mon Sep 17 00:00:00 2001 From: lilong12 Date: Wed, 2 Sep 2020 10:56:50 +0800 Subject: [PATCH] update docs (#2523) --- doc/fluid/api_cn/distributed_cn/all_gather_cn.rst | 11 ++++++----- doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst | 10 ++++++---- doc/fluid/api_cn/distributed_cn/barrier_cn.rst | 6 +++--- doc/fluid/api_cn/distributed_cn/broadcast_cn.rst | 9 +++++---- doc/fluid/api_cn/distributed_cn/reduce_cn.rst | 9 +++++---- doc/fluid/api_cn/distributed_cn/scatter_cn.rst | 11 ++++++----- 6 files changed, 31 insertions(+), 25 deletions(-) diff --git a/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst b/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst index 543b479ee..6e2032881 100644 --- a/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/all_gather_cn.rst @@ -22,14 +22,15 @@ all_gather ::::::::: .. code-block:: python + import numpy as np import paddle - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() tensor_list = [] - if paddle.ParallelEnv().local_rank == 0: + if paddle.distributed.ParallelEnv().local_rank == 0: np_data1 = np.array([[4, 5, 6], [4, 5, 6]]) np_data2 = np.array([[4, 5, 6], [4, 5, 6]]) data1 = paddle.to_tensor(np_data1) @@ -40,4 +41,4 @@ all_gather np_data2 = np.array([[1, 2, 3], [1, 2, 3]]) data1 = paddle.to_tensor(np_data1) data2 = paddle.to_tensor(np_data2) - out = paddle.distributed.all_gather(tensor_list, data2) + paddle.distributed.all_gather(tensor_list, data2) diff --git a/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst b/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst index de20db9b3..0ee8efd0c 100644 --- a/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/all_reduce_cn.rst @@ -22,17 +22,19 @@ all_reduce ::::::::: .. code-block:: python + import numpy as np import paddle from paddle.distributed import ReduceOp - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() - if paddle.ParallelEnv().local_rank == 0: + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: np_data = np.array([[4, 5, 6], [4, 5, 6]]) else: np_data = np.array([[1, 2, 3], [1, 2, 3]]) data = paddle.to_tensor(np_data) paddle.distributed.all_reduce(data) out = data.numpy() + # [[5, 7, 9], [5, 7, 9]] diff --git a/doc/fluid/api_cn/distributed_cn/barrier_cn.rst b/doc/fluid/api_cn/distributed_cn/barrier_cn.rst index 60b8e9cef..fb63526cf 100644 --- a/doc/fluid/api_cn/distributed_cn/barrier_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/barrier_cn.rst @@ -21,9 +21,9 @@ barrier .. code-block:: python import paddle - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() paddle.distributed.barrier() diff --git a/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst b/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst index b6776c15a..3022faaa8 100644 --- a/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/broadcast_cn.rst @@ -22,13 +22,14 @@ broadcast ::::::::: .. code-block:: python + import numpy as np import paddle - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() - if paddle.ParallelEnv().local_rank == 0: + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: np_data = np.array([[4, 5, 6], [4, 5, 6]]) else: np_data = np.array([[1, 2, 3], [1, 2, 3]]) diff --git a/doc/fluid/api_cn/distributed_cn/reduce_cn.rst b/doc/fluid/api_cn/distributed_cn/reduce_cn.rst index 5dfaefb59..0f223f81b 100644 --- a/doc/fluid/api_cn/distributed_cn/reduce_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/reduce_cn.rst @@ -23,13 +23,14 @@ reduce ::::::::: .. code-block:: python + import numpy as np import paddle - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() - if paddle.ParallelEnv().local_rank == 0: + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: np_data = np.array([[4, 5, 6], [4, 5, 6]]) else: np_data = np.array([[1, 2, 3], [1, 2, 3]]) diff --git a/doc/fluid/api_cn/distributed_cn/scatter_cn.rst b/doc/fluid/api_cn/distributed_cn/scatter_cn.rst index 97b347f44..1afea49e1 100644 --- a/doc/fluid/api_cn/distributed_cn/scatter_cn.rst +++ b/doc/fluid/api_cn/distributed_cn/scatter_cn.rst @@ -23,13 +23,14 @@ scatter ::::::::: .. code-block:: python + import numpy as np import paddle - import paddle.prepare_context as prepare_context + from paddle.distributed import init_parallel_env paddle.disable_static() - paddle.set_device('gpu:%d'%paddle.ParallelEnv().dev_id) - prepare_context() - if paddle.ParallelEnv().local_rank == 0: + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: np_data1 = np.array([7, 8, 9]) np_data2 = np.array([10, 11, 12]) else: @@ -37,7 +38,7 @@ scatter np_data2 = np.array([4, 5, 6]) data1 = paddle.to_tensor(np_data1) data2 = paddle.to_tensor(np_data2) - if paddle.ParallelEnv().local_rank == 0: + if paddle.distributed.ParallelEnv().local_rank == 0: paddle.distributed.scatter(data1, src=1) else: paddle.distributed.scatter(data1, tensor_list=[data1, data2], src=1) -- GitLab