__init__.py 3.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
from . import io
16
from .spawn import spawn  # noqa: F401
17
from .launch.main import launch  # noqa: F401
18 19 20
from .parallel import init_parallel_env  # noqa: F401
from .parallel import get_rank  # noqa: F401
from .parallel import get_world_size  # noqa: F401
T
tangwei12 已提交
21

22 23 24 25
from .parallel_with_gloo import gloo_init_parallel_env
from .parallel_with_gloo import gloo_barrier
from .parallel_with_gloo import gloo_release

26 27
from paddle.distributed.fleet.dataset import InMemoryDataset  # noqa: F401
from paddle.distributed.fleet.dataset import QueueDataset  # noqa: F401
Y
Yanxing Shi 已提交
28
from paddle.distributed.fleet.base.topology import ParallelMode  # noqa: F401
29 30 31

from .collective import split  # noqa: F401
from .collective import new_group  # noqa: F401
32
from .collective import is_available  # noqa: F401
33

34 35 36
from .communication import (
    stream,
    ReduceOp,
37 38
    all_gather,
    all_gather_object,
39 40 41 42
    all_reduce,
    alltoall,
    alltoall_single,
    broadcast,
43
    broadcast_object_list,
44 45 46
    reduce,
    send,
    scatter,
47
    scatter_object_list,
48 49 50 51 52 53 54 55 56
    isend,
    recv,
    irecv,
    batch_isend_irecv,
    P2POp,
    reduce_scatter,
    is_initialized,
    destroy_process_group,
    get_group,
57 58
    wait,
    barrier,
59
    get_backend,
60
)  # noqa: F401
61

62
from .auto_parallel import shard_op  # noqa: F401
63
from .auto_parallel import shard_tensor  # noqa: F401
64

65
from .fleet import BoxPSDataset  # noqa: F401
T
tangwei12 已提交
66

67 68
from .entry_attr import ProbabilityEntry  # noqa: F401
from .entry_attr import CountFilterEntry  # noqa: F401
69
from .entry_attr import ShowClickEntry  # noqa: F401
70

71 72 73
# (TODO: GhostScreaming) It needs migration of ParallelEnv. However,
# it's hard to migrate APIs in paddle.fluid.dygraph.parallel completely.
# It will be replaced later.
74 75 76 77
from paddle.fluid.dygraph.parallel import ParallelEnv  # noqa: F401

from . import cloud_utils  # noqa: F401

78 79
from .sharding import group_sharded_parallel  # noqa: F401
from .sharding import save_group_sharded_model  # noqa: F401
80

81 82
from . import rpc

83
__all__ = [  # noqa
84
    "io",
85 86 87
    "spawn",
    "launch",
    "scatter",
88
    "scatter_object_list",
89
    "broadcast",
90
    "broadcast_object_list",
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
    "ParallelEnv",
    "new_group",
    "init_parallel_env",
    "gloo_init_parallel_env",
    "gloo_barrier",
    "gloo_release",
    "QueueDataset",
    "split",
    "CountFilterEntry",
    "ShowClickEntry",
    "get_world_size",
    "get_group",
    "all_gather",
    "all_gather_object",
    "InMemoryDataset",
    "barrier",
    "all_reduce",
    "alltoall",
    "alltoall_single",
    "send",
    "reduce",
    "recv",
    "ReduceOp",
    "wait",
    "get_rank",
    "ProbabilityEntry",
    "ParallelMode",
    "is_initialized",
    "destroy_process_group",
    "isend",
    "irecv",
    "reduce_scatter",
123 124
    "is_available",
    "get_backend",
125
]