diff --git a/python/paddle/distributed/collective.py b/python/paddle/distributed/collective.py index 47db4d2e7a35d548e08cc2003682d365c1cc9c69..b631f7bbe9d110e43fd00d3b669b573fedd083fc 100644 --- a/python/paddle/distributed/collective.py +++ b/python/paddle/distributed/collective.py @@ -36,7 +36,37 @@ __all__ = [ class ReduceOp: - """Reduce Operation""" + """ + Specify the type of operation used for element-wise reductions. + It should be one of the following values: + + ReduceOp.SUM + + ReduceOp.MAX + + ReduceOp.MIN + + ReduceOp.PROD + + Examples: + .. code-block:: python + + import numpy as np + import paddle + from paddle.distributed import ReduceOp + from paddle.distributed import init_parallel_env + + paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id) + init_parallel_env() + if paddle.distributed.ParallelEnv().local_rank == 0: + np_data = np.array([[4, 5, 6], [4, 5, 6]]) + else: + np_data = np.array([[1, 2, 3], [1, 2, 3]]) + data = paddle.to_tensor(np_data) + paddle.distributed.all_reduce(data, op=ReduceOp.SUM) + out = data.numpy() + # [[5, 7, 9], [5, 7, 9]] + """ SUM = 0 MAX = 1 MIN = 2