未验证 提交 0835de79 编写于 作者: Y Yanxing Shi 提交者: GitHub

add ParallelMode docs (#41326)

上级 606848af
......@@ -25,6 +25,7 @@ from .parallel_with_gloo import gloo_release
from paddle.distributed.fleet.dataset import InMemoryDataset # noqa: F401
from paddle.distributed.fleet.dataset import QueueDataset # noqa: F401
from paddle.distributed.fleet.base.topology import ParallelMode # noqa: F401
from .collective import broadcast # noqa: F401
from .collective import all_reduce # noqa: F401
......@@ -86,4 +87,5 @@ __all__ = [ # noqa
"wait",
"get_rank",
"ProbabilityEntry",
"ParallelMode",
]
......@@ -27,6 +27,22 @@ _HYBRID_PARALLEL_GROUP = None
class ParallelMode(object):
"""
There are all the parallel modes currently supported:
- DATA_PARALLEL: Distribute input data to different devices.
- TENSOR_PARALLEL: Shards tensors in the network to different devices.
- PIPELINE_PARALLEL: Place different layers of the network on different devices.
- SHARDING_PARALLEL: Segment the model parameters, parameter gradients and optimizer states
corresponding to the parameters to each device.
Examples:
.. code-block:: python
import paddle
parallel_mode = paddle.distributed.ParallelMode
print(parallel_mode.DATA_PARALLEL) # 0
"""
DATA_PARALLEL = 0
TENSOR_PARALLEL = 1
PIPELINE_PARALLEL = 2
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册