__init__.py 3.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
from .spawn import spawn  # noqa: F401
16
from .launch.main import launch  # noqa: F401
17

18 19 20
from .parallel import init_parallel_env  # noqa: F401
from .parallel import get_rank  # noqa: F401
from .parallel import get_world_size  # noqa: F401
T
tangwei12 已提交
21

22 23 24 25
from .parallel_with_gloo import gloo_init_parallel_env
from .parallel_with_gloo import gloo_barrier
from .parallel_with_gloo import gloo_release

26 27
from paddle.distributed.fleet.dataset import InMemoryDataset  # noqa: F401
from paddle.distributed.fleet.dataset import QueueDataset  # noqa: F401
Y
Yanxing Shi 已提交
28
from paddle.distributed.fleet.base.topology import ParallelMode  # noqa: F401
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43

from .collective import broadcast  # noqa: F401
from .collective import all_reduce  # noqa: F401
from .collective import reduce  # noqa: F401
from .collective import all_gather  # noqa: F401
from .collective import scatter  # noqa: F401
from .collective import barrier  # noqa: F401
from .collective import ReduceOp  # noqa: F401
from .collective import split  # noqa: F401
from .collective import new_group  # noqa: F401
from .collective import alltoall  # noqa: F401
from .collective import recv  # noqa: F401
from .collective import get_group  # noqa: F401
from .collective import send  # noqa: F401
from .collective import wait  # noqa: F401
44 45 46 47 48 49 50 51
from .collective import is_initialized  # noqa: F401
from .collective import destroy_process_group  # noqa: F401
from .collective import alltoall_single  # noqa: F401
from .collective import isend  # noqa: F401
from .collective import irecv  # noqa: F401
from .collective import batch_isend_irecv  # noqa: F401
from .collective import P2POp  # noqa: F401
from .collective import reduce_scatter  # noqa: F401
52

53
from .auto_parallel import shard_op  # noqa: F401
54
from .auto_parallel import shard_tensor  # noqa: F401
55

56
from .fleet import BoxPSDataset  # noqa: F401
T
tangwei12 已提交
57

58 59
from .entry_attr import ProbabilityEntry  # noqa: F401
from .entry_attr import CountFilterEntry  # noqa: F401
60
from .entry_attr import ShowClickEntry  # noqa: F401
61 62 63 64 65 66

from paddle.fluid.dygraph.parallel import ParallelEnv  # noqa: F401

from . import cloud_utils  # noqa: F401
from . import utils  # noqa: F401

B
Baibaifan 已提交
67
from .sharding import *  # noqa: F401
68 69

__all__ = [  # noqa
70 71 72 73 74 75 76
    "spawn", "launch", "scatter", "broadcast", "ParallelEnv", "new_group",
    "init_parallel_env", "gloo_init_parallel_env", "gloo_barrier",
    "gloo_release", "QueueDataset", "split", "CountFilterEntry",
    "ShowClickEntry", "get_world_size", "get_group", "all_gather",
    "InMemoryDataset", "barrier", "all_reduce", "alltoall", "send", "reduce",
    "recv", "ReduceOp", "wait", "get_rank", "ProbabilityEntry", "ParallelMode",
    "is_initialized", "isend", "irecv", "reduce_scatter"
77
]