__init__.py 3.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
from .spawn import spawn  # noqa: F401
16

17 18 19
from .parallel import init_parallel_env  # noqa: F401
from .parallel import get_rank  # noqa: F401
from .parallel import get_world_size  # noqa: F401
T
tangwei12 已提交
20

21 22 23 24
from .parallel_with_gloo import gloo_init_parallel_env
from .parallel_with_gloo import gloo_barrier
from .parallel_with_gloo import gloo_release

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
from paddle.distributed.fleet.dataset import InMemoryDataset  # noqa: F401
from paddle.distributed.fleet.dataset import QueueDataset  # noqa: F401

from .collective import broadcast  # noqa: F401
from .collective import all_reduce  # noqa: F401
from .collective import reduce  # noqa: F401
from .collective import all_gather  # noqa: F401
from .collective import scatter  # noqa: F401
from .collective import barrier  # noqa: F401
from .collective import ReduceOp  # noqa: F401
from .collective import split  # noqa: F401
from .collective import new_group  # noqa: F401
from .collective import alltoall  # noqa: F401
from .collective import recv  # noqa: F401
from .collective import get_group  # noqa: F401
from .collective import send  # noqa: F401
from .collective import wait  # noqa: F401

43 44 45 46 47 48 49
from .auto_parallel import shard_tensor  # noqa: F401
from .auto_parallel import shard_op  # noqa: F401
from .auto_parallel import set_shard_mask  # noqa: F401
from .auto_parallel import set_offload_device  # noqa: F401
from .auto_parallel import set_pipeline_stage  # noqa: F401
from .auto_parallel import ProcessMesh  # noqa: F401

50
from .fleet import BoxPSDataset  # noqa: F401
T
tangwei12 已提交
51

52 53 54 55 56 57 58 59
from .entry_attr import ProbabilityEntry  # noqa: F401
from .entry_attr import CountFilterEntry  # noqa: F401

from paddle.fluid.dygraph.parallel import ParallelEnv  # noqa: F401

from . import cloud_utils  # noqa: F401
from . import utils  # noqa: F401

60 61

__all__ = [  # noqa
62 63 64 65 66 67
      "spawn",
      "scatter",
      "broadcast",
      "ParallelEnv",
      "new_group",
      "init_parallel_env",
68 69 70
      "gloo_init_parallel_env",
      "gloo_barrier",
      "gloo_release",
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
      "QueueDataset",
      "split",
      "CountFilterEntry",
      "get_world_size",
      "get_group",
      "all_gather",
      "InMemoryDataset",
      "barrier",
      "all_reduce",
      "alltoall",
      "send",
      "reduce",
      "recv",
      "ReduceOp",
      "wait",
      "get_rank",
87 88 89 90 91 92 93
      "ProbabilityEntry",
      "shard_tensor",
      "shard_op",
      "set_shard_mask",
      "set_offload_device",
      "set_pipeline_stage",
      "ProcessMesh",
94
]