未验证 提交 e4f94153 编写于 作者: L lilong12 提交者: GitHub

update doc, test=document_fix (#28498)

上级 6d8d3d4c
...@@ -107,7 +107,6 @@ def broadcast(tensor, src, group=0): ...@@ -107,7 +107,6 @@ def broadcast(tensor, src, group=0):
import paddle import paddle
from paddle.distributed import init_parallel_env from paddle.distributed import init_parallel_env
paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
init_parallel_env() init_parallel_env()
if paddle.distributed.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
...@@ -165,7 +164,6 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=0): ...@@ -165,7 +164,6 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=0):
from paddle.distributed import ReduceOp from paddle.distributed import ReduceOp
from paddle.distributed import init_parallel_env from paddle.distributed import init_parallel_env
paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
init_parallel_env() init_parallel_env()
if paddle.distributed.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
...@@ -240,7 +238,6 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=0): ...@@ -240,7 +238,6 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=0):
import paddle import paddle
from paddle.distributed import init_parallel_env from paddle.distributed import init_parallel_env
paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
init_parallel_env() init_parallel_env()
if paddle.distributed.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
...@@ -323,7 +320,6 @@ def all_gather(tensor_list, tensor, group=0): ...@@ -323,7 +320,6 @@ def all_gather(tensor_list, tensor, group=0):
import paddle import paddle
from paddle.distributed import init_parallel_env from paddle.distributed import init_parallel_env
paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
init_parallel_env() init_parallel_env()
tensor_list = [] tensor_list = []
...@@ -397,7 +393,6 @@ def scatter(tensor, tensor_list=None, src=0, group=0): ...@@ -397,7 +393,6 @@ def scatter(tensor, tensor_list=None, src=0, group=0):
import paddle import paddle
from paddle.distributed import init_parallel_env from paddle.distributed import init_parallel_env
paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
init_parallel_env() init_parallel_env()
if paddle.distributed.ParallelEnv().local_rank == 0: if paddle.distributed.ParallelEnv().local_rank == 0:
...@@ -463,7 +458,6 @@ def barrier(group=0): ...@@ -463,7 +458,6 @@ def barrier(group=0):
import paddle import paddle
from paddle.distributed import init_parallel_env from paddle.distributed import init_parallel_env
paddle.disable_static()
paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
init_parallel_env() init_parallel_env()
paddle.distributed.barrier() paddle.distributed.barrier()
......
...@@ -1098,7 +1098,6 @@ def tile(x, repeat_times, name=None): ...@@ -1098,7 +1098,6 @@ def tile(x, repeat_times, name=None):
import paddle import paddle
paddle.disable_static()
data = paddle.to_tensor([1, 2, 3], dtype='int32') data = paddle.to_tensor([1, 2, 3], dtype='int32')
out = paddle.tile(data, repeat_times=[2, 1]) out = paddle.tile(data, repeat_times=[2, 1])
np_out = out.numpy() np_out = out.numpy()
...@@ -1193,8 +1192,6 @@ def expand_as(x, y, name=None): ...@@ -1193,8 +1192,6 @@ def expand_as(x, y, name=None):
import paddle import paddle
paddle.disable_static()
data_x = paddle.to_tensor([1, 2, 3], 'int32') data_x = paddle.to_tensor([1, 2, 3], 'int32')
data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32') data_y = paddle.to_tensor([[1, 2, 3], [4, 5, 6]], 'int32')
out = paddle.expand_as(data_x, data_y) out = paddle.expand_as(data_x, data_y)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册