__init__.py 3.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
import atexit
16
from . import io
17
from .spawn import spawn  # noqa: F401
18
from .launch.main import launch  # noqa: F401
19 20 21
from .parallel import init_parallel_env  # noqa: F401
from .parallel import get_rank  # noqa: F401
from .parallel import get_world_size  # noqa: F401
22
from .parallel import ParallelEnv  # noqa: F401
T
tangwei12 已提交
23

24 25 26 27
from .parallel_with_gloo import gloo_init_parallel_env
from .parallel_with_gloo import gloo_barrier
from .parallel_with_gloo import gloo_release

28 29
from paddle.distributed.fleet.dataset import InMemoryDataset  # noqa: F401
from paddle.distributed.fleet.dataset import QueueDataset  # noqa: F401
Y
Yanxing Shi 已提交
30
from paddle.distributed.fleet.base.topology import ParallelMode  # noqa: F401
31 32 33

from .collective import split  # noqa: F401
from .collective import new_group  # noqa: F401
34 35
from .collective import is_available
from .collective import _destroy_process_group_id_map
36 37 38
from .communication import (
    stream,
    ReduceOp,
39 40
    all_gather,
    all_gather_object,
41 42 43 44
    all_reduce,
    alltoall,
    alltoall_single,
    broadcast,
45
    broadcast_object_list,
46 47 48
    reduce,
    send,
    scatter,
49
    scatter_object_list,
50 51 52 53 54 55 56 57 58
    isend,
    recv,
    irecv,
    batch_isend_irecv,
    P2POp,
    reduce_scatter,
    is_initialized,
    destroy_process_group,
    get_group,
59 60
    wait,
    barrier,
61
    get_backend,
62
)  # noqa: F401
63

64
from .auto_parallel import shard_op  # noqa: F401
65
from .auto_parallel import shard_tensor  # noqa: F401
66

67
from .fleet import BoxPSDataset  # noqa: F401
T
tangwei12 已提交
68

69 70
from .entry_attr import ProbabilityEntry  # noqa: F401
from .entry_attr import CountFilterEntry  # noqa: F401
71
from .entry_attr import ShowClickEntry  # noqa: F401
72 73 74

from . import cloud_utils  # noqa: F401

75 76
from .sharding import group_sharded_parallel  # noqa: F401
from .sharding import save_group_sharded_model  # noqa: F401
77

78 79
from . import rpc

80
__all__ = [  # noqa
81
    "io",
82 83 84
    "spawn",
    "launch",
    "scatter",
85
    "scatter_object_list",
86
    "broadcast",
87
    "broadcast_object_list",
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
    "ParallelEnv",
    "new_group",
    "init_parallel_env",
    "gloo_init_parallel_env",
    "gloo_barrier",
    "gloo_release",
    "QueueDataset",
    "split",
    "CountFilterEntry",
    "ShowClickEntry",
    "get_world_size",
    "get_group",
    "all_gather",
    "all_gather_object",
    "InMemoryDataset",
    "barrier",
    "all_reduce",
    "alltoall",
    "alltoall_single",
    "send",
    "reduce",
    "recv",
    "ReduceOp",
    "wait",
    "get_rank",
    "ProbabilityEntry",
    "ParallelMode",
    "is_initialized",
    "destroy_process_group",
    "isend",
    "irecv",
    "reduce_scatter",
120 121
    "is_available",
    "get_backend",
122
]
123 124

atexit.register(_destroy_process_group_id_map)