未验证 提交 f53e5a04 编写于 作者: 姜永久 提交者: GitHub

rm multinode eager guard tests (#48766)

* rm multinode eager guard tests

* remove unwanted tests

* reset process_mpi test
上级 7216d9bf
......@@ -34,7 +34,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import
from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import (
GroupShardedScaler,
)
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear
epoch = 10
......@@ -331,5 +330,4 @@ def test_stage2_stage3():
if __name__ == '__main__':
with _test_eager_guard():
test_stage2_stage3()
......@@ -29,7 +29,6 @@ from paddle.distributed.fleet.meta_optimizers.dygraph_optimizer.sharding_optimiz
from paddle.distributed.fleet.meta_parallel.sharding.sharding_stage2 import (
ShardingStage2,
)
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear
seed = 2022
......@@ -248,7 +247,5 @@ def test_dp_stage2():
if __name__ == '__main__':
with _test_eager_guard():
pass
fleet.init(is_collective=True, strategy=strategy)
test_dp_stage2()
......@@ -20,7 +20,6 @@ import numpy as np
import paddle
from paddle.fluid import core
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import _test_eager_guard
class TestProcessGroupFp32(unittest.TestCase):
......@@ -35,7 +34,6 @@ class TestProcessGroupFp32(unittest.TestCase):
self.shape = (2, 10, 5)
def test_create_process_group_gloo(self):
with _test_eager_guard():
nranks = ParallelEnv().nranks
rank = ParallelEnv().local_rank
is_master = True if rank == 0 else False
......@@ -175,9 +173,7 @@ class TestProcessGroupFp32(unittest.TestCase):
task = pg.scatter(tensor_x, tensor_y, 0)
task.wait()
out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]])
out2 = paddle.slice(
tensor_x, [0], [self.shape[0]], [self.shape[0] * 2]
)
out2 = paddle.slice(tensor_x, [0], [self.shape[0]], [self.shape[0] * 2])
if pg.rank() == 0:
assert np.array_equal(tensor_y, out1)
else:
......
......@@ -28,7 +28,7 @@ from paddle.distributed.collective import (
_set_group_map_by_name,
)
from paddle.fluid import core
from paddle.fluid.framework import _set_expected_place, _test_eager_guard
from paddle.fluid.framework import _set_expected_place
ctypes.CDLL("libmpi.so", mode=ctypes.RTLD_GLOBAL)
......@@ -444,10 +444,8 @@ class TestProcessGroup(unittest.TestCase):
self.shape = (2, 10, 5)
def test_create_process_group_mpi(self):
with _test_eager_guard():
group = init_process_group()
pg = group.process_group
# test allreduce sum
test_allreduce_sum(pg, self.shape, self.dtype)
......
......@@ -20,7 +20,6 @@ import numpy as np
import paddle
import paddle.distributed as dist
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import _test_eager_guard
def init_process_group(strategy=None):
......@@ -44,7 +43,6 @@ class TestProcessGroupFp32(unittest.TestCase):
self.shape = (2, 10, 5)
def test_create_process_group_nccl(self):
with _test_eager_guard():
device_id = paddle.distributed.ParallelEnv().dev_id
paddle.set_device('gpu:%d' % device_id)
......@@ -81,15 +79,11 @@ class TestProcessGroupFp32(unittest.TestCase):
max_result = paddle.maximum(tensor_x, tensor_y)
if pg.rank() == 0:
task = dist.all_reduce(
tensor_x, dist.ReduceOp.MAX, sync_op=False
)
task = dist.all_reduce(tensor_x, dist.ReduceOp.MAX, sync_op=False)
task.wait()
assert np.array_equal(tensor_x, max_result)
else:
task = dist.all_reduce(
tensor_y, dist.ReduceOp.MAX, sync_op=False
)
task = dist.all_reduce(tensor_y, dist.ReduceOp.MAX, sync_op=False)
task.wait()
assert np.array_equal(tensor_y, max_result)
......@@ -106,15 +100,11 @@ class TestProcessGroupFp32(unittest.TestCase):
min_result = paddle.minimum(tensor_x, tensor_y)
if pg.rank() == 0:
task = dist.all_reduce(
tensor_x, dist.ReduceOp.MIN, sync_op=False
)
task = dist.all_reduce(tensor_x, dist.ReduceOp.MIN, sync_op=False)
task.wait()
assert np.array_equal(tensor_x, min_result)
else:
task = dist.all_reduce(
tensor_y, dist.ReduceOp.MIN, sync_op=False
)
task = dist.all_reduce(tensor_y, dist.ReduceOp.MIN, sync_op=False)
task.wait()
assert np.array_equal(tensor_y, min_result)
......@@ -131,15 +121,11 @@ class TestProcessGroupFp32(unittest.TestCase):
prod_result = np.multiply(x, y)
if pg.rank() == 0:
task = dist.all_reduce(
tensor_x, dist.ReduceOp.PROD, sync_op=False
)
task = dist.all_reduce(tensor_x, dist.ReduceOp.PROD, sync_op=False)
task.wait()
assert np.array_equal(tensor_x, prod_result)
else:
task = dist.all_reduce(
tensor_y, dist.ReduceOp.PROD, sync_op=False
)
task = dist.all_reduce(tensor_y, dist.ReduceOp.PROD, sync_op=False)
task.wait()
assert np.array_equal(tensor_y, prod_result)
......@@ -263,9 +249,7 @@ class TestProcessGroupFp32(unittest.TestCase):
raw_tensor_x_2 = paddle.slice(
tensor_x, [0], [self.shape[0] // 2], [self.shape[0]]
)
raw_tensor_y_1 = paddle.slice(
tensor_y, [0], [0], [self.shape[0] // 2]
)
raw_tensor_y_1 = paddle.slice(tensor_y, [0], [0], [self.shape[0] // 2])
if pg.rank() == 0:
task = pg.alltoall(tensor_x, tensor_out1)
task.wait()
......@@ -298,9 +282,7 @@ class TestProcessGroupFp32(unittest.TestCase):
raw_tensor_x_2 = paddle.slice(
tensor_x, [0], [self.shape[0] // 2], [self.shape[0]]
)
raw_tensor_y_1 = paddle.slice(
tensor_y, [0], [0], [self.shape[0] // 2]
)
raw_tensor_y_1 = paddle.slice(tensor_y, [0], [0], [self.shape[0] // 2])
if pg.rank() == 0:
task = pg.alltoall(tensor_x, tensor_out1)
task.wait()
......@@ -352,15 +334,11 @@ class TestProcessGroupFp32(unittest.TestCase):
max_result = paddle.maximum(tensor_x, tensor_y)
if pg.rank() == 0:
task = dist.reduce(
tensor_x, 0, dist.ReduceOp.MAX, sync_op=False
)
task = dist.reduce(tensor_x, 0, dist.ReduceOp.MAX, sync_op=False)
task.wait()
assert np.array_equal(tensor_x, max_result)
else:
task = dist.reduce(
tensor_y, 0, dist.ReduceOp.MAX, sync_op=False
)
task = dist.reduce(tensor_y, 0, dist.ReduceOp.MAX, sync_op=False)
task.wait()
print("test reduce max api ok")
......@@ -376,15 +354,11 @@ class TestProcessGroupFp32(unittest.TestCase):
min_result = paddle.minimum(tensor_x, tensor_y)
if pg.rank() == 0:
task = dist.reduce(
tensor_x, 0, dist.ReduceOp.MIN, sync_op=False
)
task = dist.reduce(tensor_x, 0, dist.ReduceOp.MIN, sync_op=False)
task.wait()
assert np.array_equal(tensor_x, min_result)
else:
task = dist.reduce(
tensor_y, 0, dist.ReduceOp.MIN, sync_op=False
)
task = dist.reduce(tensor_y, 0, dist.ReduceOp.MIN, sync_op=False)
task.wait()
print("test reduce min api ok")
......@@ -400,15 +374,11 @@ class TestProcessGroupFp32(unittest.TestCase):
prod_result = np.multiply(x, y)
if pg.rank() == 0:
task = dist.reduce(
tensor_x, 0, dist.ReduceOp.PROD, sync_op=False
)
task = dist.reduce(tensor_x, 0, dist.ReduceOp.PROD, sync_op=False)
task.wait()
assert np.array_equal(tensor_x, prod_result)
else:
task = dist.reduce(
tensor_y, 0, dist.ReduceOp.PROD, sync_op=False
)
task = dist.reduce(tensor_y, 0, dist.ReduceOp.PROD, sync_op=False)
task.wait()
print("test reduce prod api ok")
......@@ -431,9 +401,7 @@ class TestProcessGroupFp32(unittest.TestCase):
task.wait()
paddle.device.cuda.synchronize()
out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]])
out2 = paddle.slice(
tensor_x, [0], [self.shape[0]], [self.shape[0] * 2]
)
out2 = paddle.slice(tensor_x, [0], [self.shape[0]], [self.shape[0] * 2])
if pg.rank() == 0:
assert np.array_equal(tensor_y, out1)
else:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册