未验证 提交 f53e5a04 编写于 作者: 姜永久 提交者: GitHub

rm multinode eager guard tests (#48766)

* rm multinode eager guard tests

* remove unwanted tests

* reset process_mpi test
上级 7216d9bf
...@@ -34,7 +34,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import ...@@ -34,7 +34,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import (
GroupShardedScaler, GroupShardedScaler,
) )
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
epoch = 10 epoch = 10
...@@ -331,5 +330,4 @@ def test_stage2_stage3(): ...@@ -331,5 +330,4 @@ def test_stage2_stage3():
if __name__ == '__main__': if __name__ == '__main__':
with _test_eager_guard(): test_stage2_stage3()
test_stage2_stage3()
...@@ -29,7 +29,6 @@ from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimiz ...@@ -29,7 +29,6 @@ from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimiz
from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import ( from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import (
ShardingStage2, ShardingStage2,
) )
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear from paddle.nn import Linear
seed = 2022 seed = 2022
...@@ -248,7 +247,5 @@ def test_dp_stage2(): ...@@ -248,7 +247,5 @@ def test_dp_stage2():
if __name__ == '__main__': if __name__ == '__main__':
with _test_eager_guard():
pass
fleet.init(is_collective=True, strategy=strategy) fleet.init(is_collective=True, strategy=strategy)
test_dp_stage2() test_dp_stage2()
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import _test_eager_guard
class TestProcessGroupFp32(unittest.TestCase): class TestProcessGroupFp32(unittest.TestCase):
...@@ -35,154 +34,151 @@ class TestProcessGroupFp32(unittest.TestCase): ...@@ -35,154 +34,151 @@ class TestProcessGroupFp32(unittest.TestCase):
self.shape = (2, 10, 5) self.shape = (2, 10, 5)
def test_create_process_group_gloo(self): def test_create_process_group_gloo(self):
with _test_eager_guard(): nranks = ParallelEnv().nranks
nranks = ParallelEnv().nranks rank = ParallelEnv().local_rank
rank = ParallelEnv().local_rank is_master = True if rank == 0 else False
is_master = True if rank == 0 else False store = paddle.fluid.core.TCPStore(
store = paddle.fluid.core.TCPStore( "127.0.0.1", 6272, is_master, nranks, 30
"127.0.0.1", 6272, is_master, nranks, 30 )
) pg = paddle.fluid.core.ProcessGroupGloo.create(store, rank, nranks)
pg = paddle.fluid.core.ProcessGroupGloo.create(store, rank, nranks)
# test allreduce sum
# test allreduce sum # rank 0
# rank 0 paddle.device.set_device('cpu')
paddle.device.set_device('cpu') x = np.random.random(self.shape).astype(self.dtype)
x = np.random.random(self.shape).astype(self.dtype) tensor_x = paddle.to_tensor(x)
tensor_x = paddle.to_tensor(x) # rank 1
# rank 1 y = np.random.random(self.shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype) tensor_y = paddle.to_tensor(y)
tensor_y = paddle.to_tensor(y)
sum_result = x + y
sum_result = x + y if rank == 0:
if rank == 0: task = pg.allreduce(tensor_x)
task = pg.allreduce(tensor_x) task.wait()
task.wait() np.testing.assert_equal(tensor_x, sum_result)
np.testing.assert_equal(tensor_x, sum_result) else:
else: task = pg.allreduce(tensor_y)
task = pg.allreduce(tensor_y) task.wait()
task.wait() np.testing.assert_equal(tensor_y, sum_result)
np.testing.assert_equal(tensor_y, sum_result)
print("test allreduce sum api ok")
print("test allreduce sum api ok")
# test allreduce max
# test allreduce max # rank 0
# rank 0 x = np.random.random(self.shape).astype(self.dtype)
x = np.random.random(self.shape).astype(self.dtype) tensor_x = paddle.to_tensor(x)
tensor_x = paddle.to_tensor(x) # rank 1
# rank 1 y = np.random.random(self.shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype) tensor_y = paddle.to_tensor(y)
tensor_y = paddle.to_tensor(y)
max_result = paddle.maximum(tensor_x, tensor_y)
max_result = paddle.maximum(tensor_x, tensor_y)
if rank == 0:
if rank == 0: task = pg.allreduce(tensor_x, core.ReduceOp.MAX)
task = pg.allreduce(tensor_x, core.ReduceOp.MAX) task.wait()
task.wait() assert np.array_equal(tensor_x, max_result)
assert np.array_equal(tensor_x, max_result) else:
else: task = pg.allreduce(tensor_y, core.ReduceOp.MAX)
task = pg.allreduce(tensor_y, core.ReduceOp.MAX) task.wait()
task.wait() assert np.array_equal(tensor_y, max_result)
assert np.array_equal(tensor_y, max_result)
print("test allreduce max api ok")
print("test allreduce max api ok")
# test broadcast
# test broadcast # rank 0
# rank 0 x = np.random.random(self.shape).astype(self.dtype)
x = np.random.random(self.shape).astype(self.dtype) tensor_x = paddle.to_tensor(x)
tensor_x = paddle.to_tensor(x) # rank 1
# rank 1 y = np.random.random(self.shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype) tensor_y = paddle.to_tensor(y)
tensor_y = paddle.to_tensor(y)
broadcast_result = paddle.assign(tensor_x)
broadcast_result = paddle.assign(tensor_x) if rank == 0:
if rank == 0: task = pg.broadcast(tensor_x, 0)
task = pg.broadcast(tensor_x, 0) assert np.array_equal(broadcast_result, tensor_x)
assert np.array_equal(broadcast_result, tensor_x) else:
else: task = pg.broadcast(tensor_y, 0)
task = pg.broadcast(tensor_y, 0) assert np.array_equal(broadcast_result, tensor_y)
assert np.array_equal(broadcast_result, tensor_y) print("test broadcast api ok")
print("test broadcast api ok")
# test barrier
# test barrier # rank 0
# rank 0 if pg.rank() == 0:
if pg.rank() == 0: task = pg.barrier()
task = pg.barrier() task.wait()
task.wait() # rank 1
# rank 1 else:
else: task = pg.barrier()
task = pg.barrier() task.wait()
task.wait()
print("test barrier api ok\n")
print("test barrier api ok\n")
# test allgather
# test allgather # rank 0
# rank 0 x = np.random.random(self.shape).astype(self.dtype)
x = np.random.random(self.shape).astype(self.dtype) y = np.random.random(self.shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype) tensor_x = paddle.to_tensor(x)
tensor_x = paddle.to_tensor(x) tensor_y = paddle.to_tensor(y)
tensor_y = paddle.to_tensor(y) out_shape = list(self.shape)
out_shape = list(self.shape) out_shape[0] *= 2
out_shape[0] *= 2 out = np.random.random(out_shape).astype(self.dtype)
out = np.random.random(out_shape).astype(self.dtype) tensor_out = paddle.to_tensor(out)
tensor_out = paddle.to_tensor(out) if pg.rank() == 0:
if pg.rank() == 0: task = pg.all_gather(tensor_x, tensor_out)
task = pg.all_gather(tensor_x, tensor_out) task.wait()
task.wait() paddle.device.cuda.synchronize()
paddle.device.cuda.synchronize() # rank 1
# rank 1 else:
else: task = pg.all_gather(tensor_y, tensor_out)
task = pg.all_gather(tensor_y, tensor_out) task.wait()
task.wait() out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2])
out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2]) out_2 = paddle.slice(
out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]]
tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] )
) assert np.array_equal(tensor_x, out_1)
assert np.array_equal(tensor_x, out_1) assert np.array_equal(tensor_y, out_2)
assert np.array_equal(tensor_y, out_2) print("test allgather api ok\n")
print("test allgather api ok\n")
# test Reduce
# test Reduce # rank 0
# rank 0 x = np.random.random(self.shape).astype(self.dtype)
x = np.random.random(self.shape).astype(self.dtype) y = np.random.random(self.shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype) tensor_x = paddle.to_tensor(x)
tensor_x = paddle.to_tensor(x) tensor_y = paddle.to_tensor(y)
tensor_y = paddle.to_tensor(y) sum_result = tensor_x + tensor_y
sum_result = tensor_x + tensor_y if pg.rank() == 0:
if pg.rank() == 0: task = pg.reduce(tensor_x, 0)
task = pg.reduce(tensor_x, 0) task.wait()
task.wait() # rank 1
# rank 1 else:
else: task = pg.reduce(tensor_y, 0)
task = pg.reduce(tensor_y, 0) task.wait()
task.wait() if pg.rank() == 0:
if pg.rank() == 0: assert np.array_equal(tensor_x, sum_result)
assert np.array_equal(tensor_x, sum_result) print("test reduce sum api ok\n")
print("test reduce sum api ok\n")
# test Scatter
# test Scatter # rank 0
# rank 0 in_shape = list(self.shape)
in_shape = list(self.shape) in_shape[0] *= 2
in_shape[0] *= 2 x = np.random.random(in_shape).astype(self.dtype)
x = np.random.random(in_shape).astype(self.dtype) y = np.random.random(self.shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype) tensor_x = paddle.to_tensor(x)
tensor_x = paddle.to_tensor(x) tensor_y = paddle.to_tensor(y)
tensor_y = paddle.to_tensor(y) if pg.rank() == 0:
if pg.rank() == 0: task = pg.scatter(tensor_x, tensor_y, 0)
task = pg.scatter(tensor_x, tensor_y, 0) task.wait()
task.wait() # rank 1
# rank 1 else:
else: task = pg.scatter(tensor_x, tensor_y, 0)
task = pg.scatter(tensor_x, tensor_y, 0) task.wait()
task.wait() out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]])
out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]]) out2 = paddle.slice(tensor_x, [0], [self.shape[0]], [self.shape[0] * 2])
out2 = paddle.slice( if pg.rank() == 0:
tensor_x, [0], [self.shape[0]], [self.shape[0] * 2] assert np.array_equal(tensor_y, out1)
) else:
if pg.rank() == 0: assert np.array_equal(tensor_y, out2)
assert np.array_equal(tensor_y, out1) print("test scatter api ok\n")
else:
assert np.array_equal(tensor_y, out2)
print("test scatter api ok\n")
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -28,7 +28,7 @@ from paddle.distributed.collective import ( ...@@ -28,7 +28,7 @@ from paddle.distributed.collective import (
_set_group_map_by_name, _set_group_map_by_name,
) )
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _set_expected_place, _test_eager_guard from paddle.fluid.framework import _set_expected_place
ctypes.CDLL("libmpi.so", mode=ctypes.RTLD_GLOBAL) ctypes.CDLL("libmpi.so", mode=ctypes.RTLD_GLOBAL)
...@@ -444,51 +444,49 @@ class TestProcessGroup(unittest.TestCase): ...@@ -444,51 +444,49 @@ class TestProcessGroup(unittest.TestCase):
self.shape = (2, 10, 5) self.shape = (2, 10, 5)
def test_create_process_group_mpi(self): def test_create_process_group_mpi(self):
with _test_eager_guard(): group = init_process_group()
group = init_process_group() pg = group.process_group
pg = group.process_group # test allreduce sum
test_allreduce_sum(pg, self.shape, self.dtype)
# test allreduce sum # test allreduce max
test_allreduce_sum(pg, self.shape, self.dtype) test_allreduce_max(pg, self.shape, self.dtype)
# test allreduce max # test allreduce min
test_allreduce_max(pg, self.shape, self.dtype) test_allreduce_min(pg, self.shape, self.dtype)
# test allreduce min # test allreduce prod
test_allreduce_min(pg, self.shape, self.dtype) test_allreduce_prod(pg, self.shape, self.dtype)
# test allreduce prod # test broadcast
test_allreduce_prod(pg, self.shape, self.dtype) test_broadcast(pg, self.shape, self.dtype)
# test broadcast # test barrier
test_broadcast(pg, self.shape, self.dtype) test_barrair(pg)
# test barrier # test allgather
test_barrair(pg) test_allgather(pg, self.shape, self.dtype)
# test allgather # test alltoall
test_allgather(pg, self.shape, self.dtype) test_all2all(pg, self.shape, self.dtype)
# test alltoall # test Reduce
test_all2all(pg, self.shape, self.dtype) test_reduce_sum(pg, self.shape, self.dtype)
# test Reduce # test reduce max
test_reduce_sum(pg, self.shape, self.dtype) test_reduce_max(pg, self.shape, self.dtype)
# test reduce max # test reduce min
test_reduce_max(pg, self.shape, self.dtype) test_reduce_min(pg, self.shape, self.dtype)
# test reduce min # test reduce product
test_reduce_min(pg, self.shape, self.dtype) test_reduce_prod(pg, self.shape, self.dtype)
# test reduce product # test Scatter
test_reduce_prod(pg, self.shape, self.dtype) test_scatter(pg, self.shape, self.dtype)
# test Scatter # test send recv.
test_scatter(pg, self.shape, self.dtype) test_send_recv(pg, group, self.shape, self.dtype)
# test send recv.
test_send_recv(pg, group, self.shape, self.dtype)
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册