__init__.py 3.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
import atexit
16
from . import io
17
from .spawn import spawn  # noqa: F401
18
from .launch.main import launch  # noqa: F401
19 20 21
from .parallel import init_parallel_env  # noqa: F401
from .parallel import get_rank  # noqa: F401
from .parallel import get_world_size  # noqa: F401
T
tangwei12 已提交
22

23 24 25 26
from .parallel_with_gloo import gloo_init_parallel_env
from .parallel_with_gloo import gloo_barrier
from .parallel_with_gloo import gloo_release

27 28
from paddle.distributed.fleet.dataset import InMemoryDataset  # noqa: F401
from paddle.distributed.fleet.dataset import QueueDataset  # noqa: F401
Y
Yanxing Shi 已提交
29
from paddle.distributed.fleet.base.topology import ParallelMode  # noqa: F401
30 31 32

from .collective import split  # noqa: F401
from .collective import new_group  # noqa: F401
33 34
from .collective import is_available
from .collective import _destroy_process_group_id_map
35 36 37
from .communication import (
    stream,
    ReduceOp,
38 39
    all_gather,
    all_gather_object,
40 41 42 43
    all_reduce,
    alltoall,
    alltoall_single,
    broadcast,
44
    broadcast_object_list,
45 46 47
    reduce,
    send,
    scatter,
48
    scatter_object_list,
49 50 51 52 53 54 55 56 57
    isend,
    recv,
    irecv,
    batch_isend_irecv,
    P2POp,
    reduce_scatter,
    is_initialized,
    destroy_process_group,
    get_group,
58 59
    wait,
    barrier,
60
    get_backend,
61
)  # noqa: F401
62

63
from .auto_parallel import shard_op  # noqa: F401
64
from .auto_parallel import shard_tensor  # noqa: F401
65

66
from .fleet import BoxPSDataset  # noqa: F401
T
tangwei12 已提交
67

68 69
from .entry_attr import ProbabilityEntry  # noqa: F401
from .entry_attr import CountFilterEntry  # noqa: F401
70
from .entry_attr import ShowClickEntry  # noqa: F401
71

72 73 74
# (TODO: GhostScreaming) It needs migration of ParallelEnv. However,
# it's hard to migrate APIs in paddle.fluid.dygraph.parallel completely.
# It will be replaced later.
75 76 77 78
from paddle.fluid.dygraph.parallel import ParallelEnv  # noqa: F401

from . import cloud_utils  # noqa: F401

79 80
from .sharding import group_sharded_parallel  # noqa: F401
from .sharding import save_group_sharded_model  # noqa: F401
81

82 83
from . import rpc

84
__all__ = [  # noqa
85
    "io",
86 87 88
    "spawn",
    "launch",
    "scatter",
89
    "scatter_object_list",
90
    "broadcast",
91
    "broadcast_object_list",
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
    "ParallelEnv",
    "new_group",
    "init_parallel_env",
    "gloo_init_parallel_env",
    "gloo_barrier",
    "gloo_release",
    "QueueDataset",
    "split",
    "CountFilterEntry",
    "ShowClickEntry",
    "get_world_size",
    "get_group",
    "all_gather",
    "all_gather_object",
    "InMemoryDataset",
    "barrier",
    "all_reduce",
    "alltoall",
    "alltoall_single",
    "send",
    "reduce",
    "recv",
    "ReduceOp",
    "wait",
    "get_rank",
    "ProbabilityEntry",
    "ParallelMode",
    "is_initialized",
    "destroy_process_group",
    "isend",
    "irecv",
    "reduce_scatter",
124 125
    "is_available",
    "get_backend",
126
]
127 128

atexit.register(_destroy_process_group_id_map)