未验证 提交 e83f5f33 编写于 作者: 姜永久 提交者: GitHub

remove xpu eager guard tests (#48786)

上级 25dafc58
......@@ -19,7 +19,6 @@ import numpy as np
import paddle
import paddle.distributed as dist
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from paddle.nn import Linear
paddle.seed(1024)
......@@ -69,7 +68,6 @@ class SimpleNet(fluid.Layer):
class TestDistTraning(unittest.TestCase):
def test_multiple_xpus(self):
self.trainer_id = dist.get_rank()
with _test_eager_guard():
self.pg = dist.init_parallel_env()
model_a = SimpleNet(self.trainer_id)
......
......@@ -21,7 +21,6 @@ import numpy as np
import paddle
import paddle.distributed as dist
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.framework import _test_eager_guard
def init_process_group(strategy=None):
......@@ -45,19 +44,14 @@ class TestProcessGroupFp32(unittest.TestCase):
self.shape = (2, 10, 5)
def test_create_process_group_bkcl(self):
with _test_eager_guard():
device_id = paddle.distributed.ParallelEnv().dev_id
paddle.set_device('xpu:%d' % device_id)
pg = init_process_group()
sys.stdout.write(
"rank {}: size {} name {}\n".format(
pg.rank(), pg.size(), pg.name()
)
)
sys.stdout.write(
"rank {}: test new group api ok\n".format(pg.rank())
"rank {}: size {} name {}\n".format(pg.rank(), pg.size(), pg.name())
)
sys.stdout.write("rank {}: test new group api ok\n".format(pg.rank()))
# test allreduce sum
# rank 0
......@@ -101,9 +95,7 @@ class TestProcessGroupFp32(unittest.TestCase):
paddle.device.xpu.synchronize()
assert np.array_equal(broadcast_result, tensor_y)
sys.stdout.write(
"rank {}: test broadcast api ok\n".format(pg.rank())
)
sys.stdout.write("rank {}: test broadcast api ok\n".format(pg.rank()))
# test barrier
# rank 0
......@@ -145,9 +137,7 @@ class TestProcessGroupFp32(unittest.TestCase):
)
assert np.array_equal(tensor_x, out_1)
assert np.array_equal(tensor_y, out_2)
sys.stdout.write(
"rank {}: test allgather api ok\n".format(pg.rank())
)
sys.stdout.write("rank {}: test allgather api ok\n".format(pg.rank()))
if pg.rank() == 0:
task = pg.all_gather(tensor_x, tensor_out)
......@@ -165,9 +155,7 @@ class TestProcessGroupFp32(unittest.TestCase):
)
assert np.array_equal(tensor_x, out_1)
assert np.array_equal(tensor_y, out_2)
sys.stdout.write(
"rank {}: test allgather api2 ok\n".format(pg.rank())
)
sys.stdout.write("rank {}: test allgather api2 ok\n".format(pg.rank()))
# test Reduce
# rank 0
......@@ -186,9 +174,7 @@ class TestProcessGroupFp32(unittest.TestCase):
paddle.device.xpu.synchronize()
if pg.rank() == 0:
assert np.array_equal(tensor_x, sum_result)
sys.stdout.write(
"rank {}: test reduce sum api ok\n".format(pg.rank())
)
sys.stdout.write("rank {}: test reduce sum api ok\n".format(pg.rank()))
class TestProcessGroupFp16(TestProcessGroupFp32):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册