From b2f41825f3ee90480b946ac22a3093c9c6a466f5 Mon Sep 17 00:00:00 2001 From: Roc <30228238+sljlp@users.noreply.github.com> Date: Fri, 30 Dec 2022 14:47:12 +0800 Subject: [PATCH] unit test of reduce with zero dim (#49436) --- .../collective/process_group_nccl.py | 86 +++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/python/paddle/fluid/tests/unittests/collective/process_group_nccl.py b/python/paddle/fluid/tests/unittests/collective/process_group_nccl.py index 130510d90c..3be3cfecf1 100644 --- a/python/paddle/fluid/tests/unittests/collective/process_group_nccl.py +++ b/python/paddle/fluid/tests/unittests/collective/process_group_nccl.py @@ -488,6 +488,9 @@ class TestProcessGroupFp32(unittest.TestCase): task.wait() print("test reduce prod api ok") + + test_reduce_with_zero_dim([], self.dtype, pg) + # test Scatter # rank 0 in_shape = list(self.shape) @@ -601,5 +604,88 @@ class TestProcessGroupFp16(TestProcessGroupFp32): self.shape = (4, 20, 20) +def test_reduce_with_zero_dim(shape, dtype, pg): + # test Reduce With Zero Dim + # rank 0 + x = np.random.random(shape).astype(dtype) + y = np.random.random(shape).astype(dtype) + tensor_x = paddle.to_tensor(x) + tensor_y = paddle.to_tensor(y) + sum_result = tensor_x + tensor_y + if pg.rank() == 0: + task = dist.reduce(tensor_x, 0, sync_op=True) + paddle.device.cuda.synchronize() + # rank 1 + else: + task = dist.reduce(tensor_y, 0, sync_op=False) + task.wait() + paddle.device.cuda.synchronize() + if pg.rank() == 0: + assert np.array_equal(tensor_x, sum_result) and len(tensor_x.shape) == 0 + print("test reduce with zero dim sum api ok\n") + + # test reduce with zero dim max + # rank 0 + x = np.random.random(shape).astype(dtype) + tensor_x = paddle.to_tensor(x) + # rank 1 + y = np.random.random(shape).astype(dtype) + tensor_y = paddle.to_tensor(y) + + max_result = paddle.maximum(tensor_x, tensor_y) + + if pg.rank() == 0: + task = dist.reduce(tensor_x, 0, dist.ReduceOp.MAX, sync_op=False) + task.wait() + assert np.array_equal(tensor_x, max_result) and len(tensor_x.shape) == 0 + else: + task = dist.reduce(tensor_y, 0, dist.ReduceOp.MAX, sync_op=False) + task.wait() + + print("test reduce with zero dim max api ok") + + # test reduce with zero dim min + # rank 0 + x = np.random.random(shape).astype(dtype) + tensor_x = paddle.to_tensor(x) + # rank 1 + y = np.random.random(shape).astype(dtype) + tensor_y = paddle.to_tensor(y) + + min_result = paddle.minimum(tensor_x, tensor_y) + + if pg.rank() == 0: + task = dist.reduce(tensor_x, 0, dist.ReduceOp.MIN, sync_op=False) + task.wait() + assert np.array_equal(tensor_x, min_result) and len(tensor_x.shape) == 0 + else: + task = dist.reduce(tensor_y, 0, dist.ReduceOp.MIN, sync_op=False) + task.wait() + + print("test reduce with zero dim min api ok") + + # test reduce with zero dim product + # rank 0 + x = np.random.random(shape).astype(dtype) + tensor_x = paddle.to_tensor(x) + # rank 1 + y = np.random.random(shape).astype(dtype) + tensor_y = paddle.to_tensor(y) + + prod_result = np.multiply(x, y) + + if pg.rank() == 0: + task = dist.reduce(tensor_x, 0, dist.ReduceOp.PROD, sync_op=False) + task.wait() + assert ( + np.array_equal(tensor_x, prod_result) and len(tensor_x.shape) == 0 + ) + else: + task = dist.reduce(tensor_y, 0, dist.ReduceOp.PROD, sync_op=False) + task.wait() + + print("test reduce with zero dim prod api ok") + + if __name__ == "__main__": unittest.main() -- GitLab