From 50a5bcfc9d8aa79b9f196e573c4cf05f574c04c3 Mon Sep 17 00:00:00 2001 From: Dong Daxiang <35550832+guru4elephant@users.noreply.github.com> Date: Thu, 13 Aug 2020 11:08:43 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90paddle.fleet=E3=80=91paddle.fleet=20->?= =?UTF-8?q?=20paddle.distributed.fleet.=20=20(#26186)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * move paddle.fleet to paddle.distributed.fleet --- paddle/fluid/framework/CMakeLists.txt | 14 ++--- python/paddle/__init__.py | 2 +- .../{ => distributed}/fleet/__init__.py | 0 .../{ => distributed}/fleet/base/__init__.py | 0 .../fleet/base/distributed_strategy.py | 28 ++++----- .../fleet/base/fleet_base.py | 15 ++--- .../fleet/base/meta_optimizer_factory.py | 0 .../fleet/base/private_helper_function.py | 0 .../fleet/base/role_maker.py | 2 +- .../fleet/base/runtime_factory.py | 0 .../fleet/base/strategy_compiler.py | 0 .../fleet/base/util_factory.py | 4 +- .../{ => distributed}/fleet/cloud_utils.py | 2 +- .../fleet/dataset/__init__.py | 0 .../fleet/dataset/dataset.py | 0 .../paddle/{ => distributed}/fleet/launch.py | 4 +- .../{ => distributed}/fleet/launch_utils.py | 0 .../fleet/meta_optimizers/__init__.py | 0 .../fleet/meta_optimizers/amp_optimizer.py | 0 .../async_graph_execution_optimizer.py | 0 .../fleet/meta_optimizers/async_optimizer.py | 0 .../fleet/meta_optimizers/common.py | 0 .../fleet/meta_optimizers/dgc_optimizer.py | 0 .../gradient_merge_optimizer.py | 0 .../graph_execution_optimizer.py | 0 .../fleet/meta_optimizers/lamb_optimizer.py | 0 .../fleet/meta_optimizers/lars_optimizer.py | 0 .../meta_optimizers/localsgd_optimizer.py | 0 .../meta_optimizers/meta_optimizer_base.py | 0 .../meta_optimizers/pipeline_optimizer.py | 0 .../meta_optimizers/recompute_optimizer.py | 0 .../fleet/metrics/__init__.py | 0 .../{ => distributed}/fleet/metrics/metric.py | 16 ++--- .../fleet/runtime/__init__.py | 0 .../fleet/runtime/collective_runtime.py | 0 .../fleet/runtime/parameter_server_runtime.py | 0 .../fleet/runtime/runtime_base.py | 0 .../{ => distributed}/fleet/utils/__init__.py | 0 .../{ => distributed}/fleet/utils/fs.py | 0 .../fleet/utils/http_server.py | 0 .../incubate/checkpoint/auto_checkpoint.py | 2 +- .../incubate/checkpoint/checkpoint_saver.py | 4 +- .../fluid/incubate/fleet/base/fleet_base.py | 2 +- python/paddle/fluid/reader.py | 4 +- .../fluid/tests/unittests/CMakeLists.txt | 0 .../fluid/tests/unittests/c_comm_init_op.py | 2 +- .../fluid/tests/unittests/dist_fleet_ctr.py | 4 +- .../tests/unittests/test_auto_checkpoint.py | 2 +- .../tests/unittests/test_auto_checkpoint2.py | 2 +- .../tests/unittests/test_communicator_geo.py | 6 +- .../tests/unittests/test_communicator_sync.py | 4 +- .../fluid/tests/unittests/test_dataset.py | 46 ++++++++------- .../unittests/test_dataset_dataloader.py | 2 +- .../test_dist_fleet_a_sync_optimizer_async.py | 8 +-- .../test_dist_fleet_a_sync_optimizer_geo.py | 8 +-- .../test_dist_fleet_a_sync_optimizer_sync.py | 4 +- .../tests/unittests/test_dist_fleet_base.py | 4 +- .../tests/unittests/test_dist_fleet_ps2.py | 4 +- .../test_fleet_amp_meta_optimizer.py | 4 +- .../fluid/tests/unittests/test_fleet_base.py | 34 +++++------ .../test_fleet_dgc_meta_optimizer.py | 4 +- .../test_fleet_distributed_strategy.py | 58 +++++++++---------- ...est_fleet_gradient_merge_meta_optimizer.py | 4 +- ...st_fleet_graph_execution_meta_optimizer.py | 8 +-- .../unittests/test_fleet_graph_executor.py | 4 +- .../test_fleet_lamb_meta_optimizer.py | 4 +- .../test_fleet_lars_meta_optimizer.py | 4 +- .../test_fleet_localsgd_meta_optimizer.py | 4 +- .../tests/unittests/test_fleet_metric.py | 2 +- .../test_fleet_pipeline_meta_optimizer.py | 4 +- .../unittests/test_fleet_private_function.py | 2 +- .../test_fleet_recompute_meta_optimizer.py | 4 +- .../tests/unittests/test_fleet_rolemaker_2.py | 2 +- .../tests/unittests/test_fleet_rolemaker_4.py | 6 +- .../unittests/test_fleet_rolemaker_new.py | 2 +- .../tests/unittests/test_fleet_runtime.py | 9 +-- .../fluid/tests/unittests/test_fleet_util.py | 14 ++--- .../tests/unittests/test_fs_interface.py | 2 +- .../paddle/fluid/tests/unittests/test_hdfs.py | 2 +- .../fluid/tests/unittests/test_monitor.py | 2 +- python/setup.py.in | 18 +++--- 81 files changed, 197 insertions(+), 195 deletions(-) rename python/paddle/{ => distributed}/fleet/__init__.py (100%) rename python/paddle/{ => distributed}/fleet/base/__init__.py (100%) rename python/paddle/{ => distributed}/fleet/base/distributed_strategy.py (96%) rename python/paddle/{ => distributed}/fleet/base/fleet_base.py (95%) rename python/paddle/{ => distributed}/fleet/base/meta_optimizer_factory.py (100%) rename python/paddle/{ => distributed}/fleet/base/private_helper_function.py (100%) rename python/paddle/{ => distributed}/fleet/base/role_maker.py (99%) rename python/paddle/{ => distributed}/fleet/base/runtime_factory.py (100%) rename python/paddle/{ => distributed}/fleet/base/strategy_compiler.py (100%) rename python/paddle/{ => distributed}/fleet/base/util_factory.py (99%) rename python/paddle/{ => distributed}/fleet/cloud_utils.py (97%) rename python/paddle/{ => distributed}/fleet/dataset/__init__.py (100%) rename python/paddle/{ => distributed}/fleet/dataset/dataset.py (100%) rename python/paddle/{ => distributed}/fleet/launch.py (99%) rename python/paddle/{ => distributed}/fleet/launch_utils.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/__init__.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/amp_optimizer.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/async_graph_execution_optimizer.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/async_optimizer.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/common.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/dgc_optimizer.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/gradient_merge_optimizer.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/graph_execution_optimizer.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/lamb_optimizer.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/lars_optimizer.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/localsgd_optimizer.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/meta_optimizer_base.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/pipeline_optimizer.py (100%) rename python/paddle/{ => distributed}/fleet/meta_optimizers/recompute_optimizer.py (100%) rename python/paddle/{ => distributed}/fleet/metrics/__init__.py (100%) rename python/paddle/{ => distributed}/fleet/metrics/metric.py (95%) rename python/paddle/{ => distributed}/fleet/runtime/__init__.py (100%) rename python/paddle/{ => distributed}/fleet/runtime/collective_runtime.py (100%) rename python/paddle/{ => distributed}/fleet/runtime/parameter_server_runtime.py (100%) rename python/paddle/{ => distributed}/fleet/runtime/runtime_base.py (100%) rename python/paddle/{ => distributed}/fleet/utils/__init__.py (100%) rename python/paddle/{ => distributed}/fleet/utils/fs.py (100%) rename python/paddle/{ => distributed}/fleet/utils/http_server.py (100%) mode change 100755 => 100644 python/paddle/fluid/tests/unittests/CMakeLists.txt diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index ff2d08bb772..d725bdffa01 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -164,23 +164,23 @@ if(WITH_PYTHON) if (NOT WIN32) add_custom_command(TARGET framework_py_proto POST_BUILD COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto - COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto - COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto/__init__.py + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto + COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/__init__.py COMMAND cp *.py ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/ - COMMAND cp distributed_strategy_*.py ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto + COMMAND cp distributed_strategy_*.py ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto COMMENT "Copy generated python proto into directory paddle/fluid/proto." WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) else(NOT WIN32) string(REPLACE "/" "\\" proto_dstpath "${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/") - string(REPLACE "/" "\\" fleet_proto_dstpath "${PADDLE_BINARY_DIR}/python/paddle/fleet/proto/") + string(REPLACE "/" "\\" fleet_proto_dstpath "${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/") add_custom_command(TARGET framework_py_proto POST_BUILD COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto - COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto - COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto/__init__.py + COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto + COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/__init__.py COMMAND copy /Y *.py ${proto_dstpath} COMMAND copy /Y distributed_strategy_*.py ${fleet_proto_dstpath} COMMENT "Copy generated python proto into directory paddle/fluid/proto." - COMMENT "Copy generated python proto into directory paddle/fleet/proto." + COMMENT "Copy generated python proto into directory paddle/distributed/fleet/proto." WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif(NOT WIN32) endif() diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 63aba8ec71c..b125b36c6b6 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -36,7 +36,7 @@ import paddle.distributed import paddle.sysconfig import paddle.tensor import paddle.nn -import paddle.fleet +import paddle.distributed.fleet import paddle.framework import paddle.optimizer import paddle.metric diff --git a/python/paddle/fleet/__init__.py b/python/paddle/distributed/fleet/__init__.py similarity index 100% rename from python/paddle/fleet/__init__.py rename to python/paddle/distributed/fleet/__init__.py diff --git a/python/paddle/fleet/base/__init__.py b/python/paddle/distributed/fleet/base/__init__.py similarity index 100% rename from python/paddle/fleet/base/__init__.py rename to python/paddle/distributed/fleet/base/__init__.py diff --git a/python/paddle/fleet/base/distributed_strategy.py b/python/paddle/distributed/fleet/base/distributed_strategy.py similarity index 96% rename from python/paddle/fleet/base/distributed_strategy.py rename to python/paddle/distributed/fleet/base/distributed_strategy.py index 26b4c8f572a..31bfd482766 100755 --- a/python/paddle/fleet/base/distributed_strategy.py +++ b/python/paddle/distributed/fleet/base/distributed_strategy.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle -from paddle.fleet.proto import distributed_strategy_pb2 +from paddle.distributed.fleet.proto import distributed_strategy_pb2 from paddle.fluid.framework import Variable import google.protobuf.text_format @@ -103,7 +103,7 @@ class DistributedStrategy(object): Examples: .. code-block:: python - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() strategy.dgc = True strategy.recompute = True @@ -120,7 +120,7 @@ class DistributedStrategy(object): Examples: .. code-block:: python - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() strategy.load_from_prototxt("dist_strategy.protoxt") """ @@ -141,7 +141,7 @@ class DistributedStrategy(object): exe_strategy.num_iteration_per_drop_scope = 10 exe_strategy.num_iteration_per_run = 10 - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.execution_strategy = exe_strategy """ execution_strategy = paddle.fluid.ExecutionStrategy() @@ -178,7 +178,7 @@ class DistributedStrategy(object): build_strategy.fuse_all_optimizer_ops = True build_strategy.enable_inplace = True - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.build_strategy = build_strategy """ @@ -211,7 +211,7 @@ class DistributedStrategy(object): Examples: .. code-block:: python - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet role_maker = fleet.PaddleCloudRoleMaker() fleet.init(role_maker) @@ -253,7 +253,7 @@ class DistributedStrategy(object): Examples: .. code-block:: python - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet role_maker = fleet.PaddleCloudRoleMaker() fleet.init(role_maker) @@ -282,7 +282,7 @@ class DistributedStrategy(object): Examples: .. code-block:: python - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() strategy.amp = True # by default this is false @@ -314,7 +314,7 @@ class DistributedStrategy(object): Examples: .. code-block:: python - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() strategy.recompute = True # suppose x and y are names of checkpoint tensors for recomputation @@ -432,7 +432,7 @@ class DistributedStrategy(object): Examples: .. code-block:: python - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() strategy.recompute = True strategy.recompute_configs = {"checkpionts": ["x", "y"]} @@ -457,7 +457,7 @@ class DistributedStrategy(object): Examples: .. code-block:: python - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() strategy.pipeline = True @@ -490,7 +490,7 @@ class DistributedStrategy(object): Examples: .. code-block:: python - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() strategy.pipeline = True strategy.pipeline_configs = {"micro_batch": 12} @@ -560,7 +560,7 @@ class DistributedStrategy(object): Examples: .. code-block:: python - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() strategy.gradient_merge = True strategy.gradient_merge_configs = {"k_steps": 4, "avg": True} @@ -583,7 +583,7 @@ class DistributedStrategy(object): avg (bool): whether to average the gradients of each mini-batch, the default value is `True` Example: - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet strategy = fleet.DistributedStrategy() strategy.gradient_merge = True strategy.gradient_merge_configs = {"k_steps": 4, "avg": True} diff --git a/python/paddle/fleet/base/fleet_base.py b/python/paddle/distributed/fleet/base/fleet_base.py similarity index 95% rename from python/paddle/fleet/base/fleet_base.py rename to python/paddle/distributed/fleet/base/fleet_base.py index 6f4bdc166d6..845e331d557 100644 --- a/python/paddle/fleet/base/fleet_base.py +++ b/python/paddle/distributed/fleet/base/fleet_base.py @@ -34,9 +34,8 @@ class Fleet(object): Examples: .. code-block:: python - import paddle.fleet as fleet - import paddle.fluid.incubate.fleet.base.role_maker as role_maker - role = role_maker.PaddleCloudRoleMaker(is_collective=True) + import paddle.distributed.fleet as fleet + role = fleet.role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) strategy = fleet.DistributedStrategy() optimizer = paddle.optimizer.SGD(learning_rate=0.001) @@ -218,9 +217,8 @@ class Fleet(object): Examples: .. code-block:: python - import paddle.fleet as fleet - import paddle.fluid.incubate.fleet.base.role_maker as role_maker - role = role_maker.PaddleCloudRoleMaker(is_collective=True) + import paddle.distributed.fleet as fleet + role = fleet.role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) strategy = fleet.DistributedStrategy() optimizer = paddle.optimizer.SGD(learning_rate=0.001) @@ -260,8 +258,7 @@ class Fleet(object): Examples: import paddle - import paddle.fleet as fleet - import paddle.fluid.incubate.fleet.base.role_maker as role_maker + import paddle.distributed.fleet as fleet fc_1 = paddle.layers.fc(input=input_x, size=hid_dim, act='tanh') fc_2 = paddlen.layers.fc(input=fc_1, size=hid_dim, act='tanh') @@ -269,7 +266,7 @@ class Fleet(object): cost = paddle.layers.cross_entropy(input=prediction, label=input_y) avg_cost = paddle.layers.mean(x=cost) - role = role_maker.PaddleCloudRoleMaker(is_collective=True) + role = fleet.role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) strategy = fleet.DistributedStrategy() optimizer = paddle.optimizer.SGD(learning_rate=0.001) diff --git a/python/paddle/fleet/base/meta_optimizer_factory.py b/python/paddle/distributed/fleet/base/meta_optimizer_factory.py similarity index 100% rename from python/paddle/fleet/base/meta_optimizer_factory.py rename to python/paddle/distributed/fleet/base/meta_optimizer_factory.py diff --git a/python/paddle/fleet/base/private_helper_function.py b/python/paddle/distributed/fleet/base/private_helper_function.py similarity index 100% rename from python/paddle/fleet/base/private_helper_function.py rename to python/paddle/distributed/fleet/base/private_helper_function.py diff --git a/python/paddle/fleet/base/role_maker.py b/python/paddle/distributed/fleet/base/role_maker.py similarity index 99% rename from python/paddle/fleet/base/role_maker.py rename to python/paddle/distributed/fleet/base/role_maker.py index b3e8120af6f..0cf909c98c0 100644 --- a/python/paddle/fleet/base/role_maker.py +++ b/python/paddle/distributed/fleet/base/role_maker.py @@ -481,7 +481,7 @@ class PaddleCloudRoleMaker(RoleMakerBase): return "lo" def __start_kv_server(self, http_server_d, size_d): - from paddle.fleet.utils import KVServer + from paddle.distributed.fleet.utils import KVServer http_server = KVServer(int(self._http_ip_port[1]), size_d) http_server.start() wait_seconds = 5 diff --git a/python/paddle/fleet/base/runtime_factory.py b/python/paddle/distributed/fleet/base/runtime_factory.py similarity index 100% rename from python/paddle/fleet/base/runtime_factory.py rename to python/paddle/distributed/fleet/base/runtime_factory.py diff --git a/python/paddle/fleet/base/strategy_compiler.py b/python/paddle/distributed/fleet/base/strategy_compiler.py similarity index 100% rename from python/paddle/fleet/base/strategy_compiler.py rename to python/paddle/distributed/fleet/base/strategy_compiler.py diff --git a/python/paddle/fleet/base/util_factory.py b/python/paddle/distributed/fleet/base/util_factory.py similarity index 99% rename from python/paddle/fleet/base/util_factory.py rename to python/paddle/distributed/fleet/base/util_factory.py index ed2a8db586a..f5a6c417c0c 100644 --- a/python/paddle/fleet/base/util_factory.py +++ b/python/paddle/distributed/fleet/base/util_factory.py @@ -55,8 +55,8 @@ class UtilBase(object): def set_file_system(self, fs_client): assert isinstance( - fs_client, - FS), "fs_client must be the instance of paddle.fleet.utils.FS" + fs_client, FS + ), "fs_client must be the instance of paddle.distributed.fleet.utils.FS" self.fs_client = fs_client def __check_comm_world(self, comm_world="worker"): diff --git a/python/paddle/fleet/cloud_utils.py b/python/paddle/distributed/fleet/cloud_utils.py similarity index 97% rename from python/paddle/fleet/cloud_utils.py rename to python/paddle/distributed/fleet/cloud_utils.py index 72c306fe3b9..49d66118d90 100644 --- a/python/paddle/fleet/cloud_utils.py +++ b/python/paddle/distributed/fleet/cloud_utils.py @@ -14,7 +14,7 @@ import os import paddle -from paddle.fleet.launch_utils import get_cluster, logger +from paddle.distributed.fleet.launch_utils import get_cluster, logger def get_cloud_cluster(args_node_ips, selected_gpus, args_port=6170): diff --git a/python/paddle/fleet/dataset/__init__.py b/python/paddle/distributed/fleet/dataset/__init__.py similarity index 100% rename from python/paddle/fleet/dataset/__init__.py rename to python/paddle/distributed/fleet/dataset/__init__.py diff --git a/python/paddle/fleet/dataset/dataset.py b/python/paddle/distributed/fleet/dataset/dataset.py similarity index 100% rename from python/paddle/fleet/dataset/dataset.py rename to python/paddle/distributed/fleet/dataset/dataset.py diff --git a/python/paddle/fleet/launch.py b/python/paddle/distributed/fleet/launch.py similarity index 99% rename from python/paddle/fleet/launch.py rename to python/paddle/distributed/fleet/launch.py index a6f71f2ae94..29a1bda92f1 100644 --- a/python/paddle/fleet/launch.py +++ b/python/paddle/distributed/fleet/launch.py @@ -66,8 +66,8 @@ from argparse import ArgumentParser, REMAINDER import paddle import paddle.fluid as fluid -from paddle.fleet.launch_utils import * -import paddle.fleet.cloud_utils as cloud_utils +from paddle.distributed.fleet.launch_utils import * +import paddle.distributed.fleet.cloud_utils as cloud_utils def _print_arguments(args): diff --git a/python/paddle/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py similarity index 100% rename from python/paddle/fleet/launch_utils.py rename to python/paddle/distributed/fleet/launch_utils.py diff --git a/python/paddle/fleet/meta_optimizers/__init__.py b/python/paddle/distributed/fleet/meta_optimizers/__init__.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/__init__.py rename to python/paddle/distributed/fleet/meta_optimizers/__init__.py diff --git a/python/paddle/fleet/meta_optimizers/amp_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/amp_optimizer.py rename to python/paddle/distributed/fleet/meta_optimizers/amp_optimizer.py diff --git a/python/paddle/fleet/meta_optimizers/async_graph_execution_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/async_graph_execution_optimizer.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/async_graph_execution_optimizer.py rename to python/paddle/distributed/fleet/meta_optimizers/async_graph_execution_optimizer.py diff --git a/python/paddle/fleet/meta_optimizers/async_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/async_optimizer.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/async_optimizer.py rename to python/paddle/distributed/fleet/meta_optimizers/async_optimizer.py diff --git a/python/paddle/fleet/meta_optimizers/common.py b/python/paddle/distributed/fleet/meta_optimizers/common.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/common.py rename to python/paddle/distributed/fleet/meta_optimizers/common.py diff --git a/python/paddle/fleet/meta_optimizers/dgc_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/dgc_optimizer.py rename to python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py diff --git a/python/paddle/fleet/meta_optimizers/gradient_merge_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/gradient_merge_optimizer.py rename to python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py diff --git a/python/paddle/fleet/meta_optimizers/graph_execution_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/graph_execution_optimizer.py rename to python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py diff --git a/python/paddle/fleet/meta_optimizers/lamb_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/lamb_optimizer.py rename to python/paddle/distributed/fleet/meta_optimizers/lamb_optimizer.py diff --git a/python/paddle/fleet/meta_optimizers/lars_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/lars_optimizer.py rename to python/paddle/distributed/fleet/meta_optimizers/lars_optimizer.py diff --git a/python/paddle/fleet/meta_optimizers/localsgd_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/localsgd_optimizer.py rename to python/paddle/distributed/fleet/meta_optimizers/localsgd_optimizer.py diff --git a/python/paddle/fleet/meta_optimizers/meta_optimizer_base.py b/python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/meta_optimizer_base.py rename to python/paddle/distributed/fleet/meta_optimizers/meta_optimizer_base.py diff --git a/python/paddle/fleet/meta_optimizers/pipeline_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/pipeline_optimizer.py rename to python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py diff --git a/python/paddle/fleet/meta_optimizers/recompute_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py similarity index 100% rename from python/paddle/fleet/meta_optimizers/recompute_optimizer.py rename to python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py diff --git a/python/paddle/fleet/metrics/__init__.py b/python/paddle/distributed/fleet/metrics/__init__.py similarity index 100% rename from python/paddle/fleet/metrics/__init__.py rename to python/paddle/distributed/fleet/metrics/__init__.py diff --git a/python/paddle/fleet/metrics/metric.py b/python/paddle/distributed/fleet/metrics/metric.py similarity index 95% rename from python/paddle/fleet/metrics/metric.py rename to python/paddle/distributed/fleet/metrics/metric.py index 152ee21c147..12a24292e5a 100644 --- a/python/paddle/fleet/metrics/metric.py +++ b/python/paddle/distributed/fleet/metrics/metric.py @@ -43,7 +43,7 @@ def sum(input, scope=None): # in train.py, after train or infer res = np.array(scope.find_var(global_cnt.name).get_tensor()) - print("sum array: ", paddle.fleet.sum(res)) + print("sum array: ", paddle.distributed.fleet.sum(res)) """ fleet._role_maker._barrier_worker() if scope is None: @@ -82,7 +82,7 @@ def max(input, scope=None): # in train.py, after train or infer res = np.array(scope.find_var(global_cnt.name).get_tensor()) - print("max array: ", paddle.fleet.max(res)) + print("max array: ", paddle.distributed.fleet.max(res)) """ fleet._role_maker._barrier_worker() if scope is None: @@ -121,7 +121,7 @@ def min(input, scope=None): # in train.py, after train or infer res = np.array(scope.find_var(global_cnt.name).get_tensor()) - print("min array: ", paddle.fleet.min(res)) + print("min array: ", paddle.distributed.fleet.min(res)) """ fleet._role_maker._barrier_worker() if scope is None: @@ -162,7 +162,7 @@ def auc(stat_pos, stat_neg, scope=None): # in train.py, after train or infer pos = np.array(scope.find_var(stat_pos.name).get_tensor()) neg = np.array(scope.find_var(stat_neg.name).get_tensor()) - print("auc: ", paddle.fleet.auc(pos, neg)) + print("auc: ", paddle.distributed.fleet.auc(pos, neg)) """ fleet._role_maker._barrier_worker() if scope is None: @@ -240,7 +240,7 @@ def mae(abserr, total_ins_num, scope=None): # in train.py, after train or infer res = np.array(scope.find_var(abserr.name).get_tensor()) - print("mae: ", paddle.fleet.mae(res, total_ins_num)) + print("mae: ", paddle.distributed.fleet.mae(res, total_ins_num)) """ fleet._role_maker._barrier_worker() if scope is None: @@ -278,7 +278,7 @@ def rmse(sqrerr, total_ins_num, scope=None): # in train.py, after train or infer res = np.array(scope.find_var(sqrerr.name).get_tensor()) - print("rmse: ", paddle.fleet.rmse(res, total_ins_num)) + print("rmse: ", paddle.distributed.fleet.rmse(res, total_ins_num)) """ fleet._role_maker._barrier_worker() if scope is None: @@ -316,7 +316,7 @@ def mse(sqrerr, total_ins_num, scope=None): # in train.py, after train or infer metric = np.array(scope.find_var(sqrerr.name).get_tensor()) - print("mse: ", paddle.fleet.mse(metric, total_ins_num)) + print("mse: ", paddle.distributed.fleet.mse(metric, total_ins_num)) """ fleet._role_maker._barrier_worker() if scope is None: @@ -365,7 +365,7 @@ def acc(correct, total, scope=None): # in train.py, after train or infer correct_num = np.array(scope.find_var(correct.name).get_tensor()) total_num = np.array(scope.find_var(total.name).get_tensor()) - print("accuracy: ", paddle.fleet.acc(correct_num, total_num)) + print("accuracy: ", paddle.distributed.fleet.acc(correct_num, total_num)) """ fleet._role_maker._barrier_worker() if scope is None: diff --git a/python/paddle/fleet/runtime/__init__.py b/python/paddle/distributed/fleet/runtime/__init__.py similarity index 100% rename from python/paddle/fleet/runtime/__init__.py rename to python/paddle/distributed/fleet/runtime/__init__.py diff --git a/python/paddle/fleet/runtime/collective_runtime.py b/python/paddle/distributed/fleet/runtime/collective_runtime.py similarity index 100% rename from python/paddle/fleet/runtime/collective_runtime.py rename to python/paddle/distributed/fleet/runtime/collective_runtime.py diff --git a/python/paddle/fleet/runtime/parameter_server_runtime.py b/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py similarity index 100% rename from python/paddle/fleet/runtime/parameter_server_runtime.py rename to python/paddle/distributed/fleet/runtime/parameter_server_runtime.py diff --git a/python/paddle/fleet/runtime/runtime_base.py b/python/paddle/distributed/fleet/runtime/runtime_base.py similarity index 100% rename from python/paddle/fleet/runtime/runtime_base.py rename to python/paddle/distributed/fleet/runtime/runtime_base.py diff --git a/python/paddle/fleet/utils/__init__.py b/python/paddle/distributed/fleet/utils/__init__.py similarity index 100% rename from python/paddle/fleet/utils/__init__.py rename to python/paddle/distributed/fleet/utils/__init__.py diff --git a/python/paddle/fleet/utils/fs.py b/python/paddle/distributed/fleet/utils/fs.py similarity index 100% rename from python/paddle/fleet/utils/fs.py rename to python/paddle/distributed/fleet/utils/fs.py diff --git a/python/paddle/fleet/utils/http_server.py b/python/paddle/distributed/fleet/utils/http_server.py similarity index 100% rename from python/paddle/fleet/utils/http_server.py rename to python/paddle/distributed/fleet/utils/http_server.py diff --git a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py index 3016ca4a50d..ad51a043a0a 100644 --- a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py +++ b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py @@ -305,7 +305,7 @@ class TrainEpochRange(SerializableBase): if self._checker.ce_test: config = None - from paddle.fleet.utils.fs import HDFSClient + from paddle.distributed.fleet.utils.fs import HDFSClient self._hdfs = HDFSClient(self._checker.hdfs_home, config) self._cper = CheckpointSaver(self._hdfs) diff --git a/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py b/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py index 23c6f9f7b9c..08400ab13a2 100644 --- a/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py +++ b/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py @@ -79,7 +79,7 @@ class CheckpointSaver(object): tmp_path = "{}.tmp".format(real_path) saved_path = tmp_path - from paddle.fleet.utils.fs import LocalFS + from paddle.distributed.fleet.utils.fs import LocalFS local_fs = LocalFS() cache_path = None @@ -134,7 +134,7 @@ class CheckpointSaver(object): assert isinstance(checkpoint_no, int) assert checkpoint_no >= 0 - from paddle.fleet.utils.fs import LocalFS + from paddle.distributed.fleet.utils.fs import LocalFS local_fs = LocalFS() if self._fs.need_upload_download(): cache_path = "{}/{}.{}.load_cache".format( diff --git a/python/paddle/fluid/incubate/fleet/base/fleet_base.py b/python/paddle/fluid/incubate/fleet/base/fleet_base.py index f236a3e98c6..f885e51ef7f 100644 --- a/python/paddle/fluid/incubate/fleet/base/fleet_base.py +++ b/python/paddle/fluid/incubate/fleet/base/fleet_base.py @@ -21,7 +21,7 @@ from paddle.fluid.executor import Executor from paddle.fluid.optimizer import SGD from paddle.fluid.incubate.fleet.base.mode import Mode -from paddle.fleet.base.role_maker import RoleMakerBase +from paddle.distributed.fleet.base.role_maker import RoleMakerBase from paddle.fluid.contrib.mixed_precision.decorator import OptimizerWithMixedPrecision from . import mode diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index cd699d33254..7e633756fce 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -1709,7 +1709,7 @@ class PyReader(DataLoaderBase): class DatasetLoader(DataLoaderBase): def __init__(self, dataset, places, drop_last): - assert isinstance(dataset, paddle.fleet.dataset. + assert isinstance(dataset, paddle.distributed.fleet.dataset. DatasetBase), "dataset must be type of DatasetBase" assert not in_dygraph_mode( ), "DatasetLoader is not supported in dygraph mode yet" @@ -1725,7 +1725,7 @@ class DatasetLoader(DataLoaderBase): dataset.set_thread(thread_num) - if isinstance(dataset, paddle.fleet.dataset. + if isinstance(dataset, paddle.distributed.fleet.dataset. InMemoryDataset) and dataset.queue_num > thread_num: logging.warn("queue_num {} which is set in Dataset is ignored". format(dataset.queue_num)) diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt old mode 100755 new mode 100644 diff --git a/python/paddle/fluid/tests/unittests/c_comm_init_op.py b/python/paddle/fluid/tests/unittests/c_comm_init_op.py index f32b52b251c..db77477cca6 100644 --- a/python/paddle/fluid/tests/unittests/c_comm_init_op.py +++ b/python/paddle/fluid/tests/unittests/c_comm_init_op.py @@ -18,7 +18,7 @@ import unittest import os import paddle.fluid.core as core import paddle.fluid as fluid -from paddle.fleet.base.private_helper_function import wait_server_ready +from paddle.distributed.fleet.base.private_helper_function import wait_server_ready class TestCCommInitOp(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py index 033bc385005..cb0fd12c22b 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py @@ -28,7 +28,7 @@ import numpy as np import ctr_dataset_reader from test_dist_fleet_base import runtime_main, FleetDistRunnerBase -from paddle.fleet.base.util_factory import fleet_util +from paddle.distributed.fleet.base.util_factory import fleet_util # Fix seed for test fluid.default_startup_program().random_seed = 1 @@ -217,7 +217,7 @@ class TestDistCTR2x2(FleetDistRunnerBase): filelist.append(train_file_path) # config dataset - dataset = paddle.fleet.DatasetFactory().create_dataset() + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset() dataset.set_batch_size(batch_size) dataset.set_use_var(self.feeds) pipe_command = 'python ctr_dataset_reader.py' diff --git a/python/paddle/fluid/tests/unittests/test_auto_checkpoint.py b/python/paddle/fluid/tests/unittests/test_auto_checkpoint.py index 7f02467737d..729fe20c8f8 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_checkpoint.py +++ b/python/paddle/fluid/tests/unittests/test_auto_checkpoint.py @@ -20,7 +20,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet import os import sys -from paddle.fleet.utils.fs import LocalFS, HDFSClient +from paddle.distributed.fleet.utils.fs import LocalFS, HDFSClient import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel from paddle.fluid.framework import program_guard diff --git a/python/paddle/fluid/tests/unittests/test_auto_checkpoint2.py b/python/paddle/fluid/tests/unittests/test_auto_checkpoint2.py index f4bbb6d51cd..30a74351053 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_checkpoint2.py +++ b/python/paddle/fluid/tests/unittests/test_auto_checkpoint2.py @@ -20,7 +20,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet import os import sys -from paddle.fleet.utils.fs import LocalFS, HDFSClient +from paddle.distributed.fleet.utils.fs import LocalFS, HDFSClient import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel from paddle.fluid.framework import program_guard diff --git a/python/paddle/fluid/tests/unittests/test_communicator_geo.py b/python/paddle/fluid/tests/unittests/test_communicator_geo.py index bff8908682d..b277047500f 100644 --- a/python/paddle/fluid/tests/unittests/test_communicator_geo.py +++ b/python/paddle/fluid/tests/unittests/test_communicator_geo.py @@ -26,7 +26,7 @@ import paddle import paddle.fluid as fluid import paddle.fluid.incubate.fleet.base.role_maker as role_maker -import paddle.fleet as fleet +import paddle.distributed.fleet as fleet class TestCommunicatorGeoEnd2End(unittest.TestCase): @@ -108,7 +108,7 @@ class TestCommunicatorGeoEnd2End(unittest.TestCase): role = role_maker.PaddleCloudRoleMaker() - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True strategy.a_sync_configs = {"k_steps": 100} @@ -136,7 +136,7 @@ import paddle.fluid as fluid from paddle.fluid.communicator import Communicator import paddle.fluid.incubate.fleet.base.role_maker as role_maker from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode -import paddle.fleet as fleet +import paddle.distributed.fleet as fleet from test_communicator_geo import TestCommunicatorGeoEnd2End diff --git a/python/paddle/fluid/tests/unittests/test_communicator_sync.py b/python/paddle/fluid/tests/unittests/test_communicator_sync.py index 7ac5c141779..e6db5c4d8c1 100644 --- a/python/paddle/fluid/tests/unittests/test_communicator_sync.py +++ b/python/paddle/fluid/tests/unittests/test_communicator_sync.py @@ -22,7 +22,7 @@ import paddle import paddle.fluid as fluid import paddle.fluid.incubate.fleet.base.role_maker as role_maker -import paddle.fleet as fleet +import paddle.distributed.fleet as fleet class TestCommunicator(unittest.TestCase): @@ -51,7 +51,7 @@ class TestCommunicator(unittest.TestCase): optimizer = fluid.optimizer.SGD(0.01) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = False optimizer = fleet.distributed_optimizer(optimizer, strategy) diff --git a/python/paddle/fluid/tests/unittests/test_dataset.py b/python/paddle/fluid/tests/unittests/test_dataset.py index 90d5f585395..582bb3dcc68 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset.py +++ b/python/paddle/fluid/tests/unittests/test_dataset.py @@ -38,25 +38,25 @@ class TestDataset(unittest.TestCase): def test_dataset_create(self): """ Testcase for dataset create. """ try: - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") except: self.assertTrue(False) try: - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "QueueDataset") except: self.assertTrue(False) try: - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "FileInstantDataset") except: self.assertTrue(False) try: - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "MyOwnDataset") self.assertTrue(False) except: @@ -95,7 +95,7 @@ class TestDataset(unittest.TestCase): name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_batch_size(32) dataset.set_thread(3) @@ -176,7 +176,7 @@ class TestDataset(unittest.TestCase): name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_batch_size(32) dataset.set_thread(3) @@ -228,7 +228,7 @@ class TestDataset(unittest.TestCase): name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_batch_size(32) dataset.set_thread(3) @@ -300,7 +300,7 @@ class TestDataset(unittest.TestCase): name=slot, shape=[1], dtype="float32", lod_level=1) slots_vars.append(var) - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_batch_size(32) dataset.set_thread(1) @@ -367,7 +367,7 @@ class TestDataset(unittest.TestCase): name="slot4", shape=[1], dtype="float32", lod_level=0) slots_vars = [var1, var2, var3, var4] - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_batch_size(32) dataset.set_thread(1) @@ -423,7 +423,7 @@ class TestDataset(unittest.TestCase): name=slot, shape=[1], dtype="float32", lod_level=1) slots_vars.append(var) - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_batch_size(32) dataset.set_thread(3) @@ -517,7 +517,8 @@ class TestDataset(unittest.TestCase): name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) - dataset = paddle.fleet.DatasetFactory().create_dataset("QueueDataset") + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( + "QueueDataset") dataset.set_batch_size(32) dataset.set_thread(3) dataset.set_filelist( @@ -542,7 +543,8 @@ class TestDataset(unittest.TestCase): except Exception as e: self.assertTrue(False) - dataset2 = paddle.fleet.DatasetFactory().create_dataset("QueueDataset") + dataset2 = paddle.distributed.fleet.DatasetFactory().create_dataset( + "QueueDataset") dataset2.set_use_var(slots_vars) dataset2.set_batch_size(32) dataset2.set_thread(3) @@ -583,7 +585,8 @@ class TestDataset(unittest.TestCase): name=slot, shape=[1], dtype="float32", lod_level=1) slots_vars.append(var) - dataset = paddle.fleet.DatasetFactory().create_dataset("QueueDataset") + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( + "QueueDataset") dataset.set_batch_size(32) dataset.set_thread(3) dataset.set_filelist( @@ -638,7 +641,7 @@ class TestDataset(unittest.TestCase): name=slot, shape=[None, 1], dtype="int64", lod_level=1) slots_vars.append(var) - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_input_type(1) dataset.set_batch_size(1) @@ -718,7 +721,8 @@ class TestDatasetWithFetchHandler(unittest.TestCase): inputs(list): inputs of get_dataset files(list): files of get_dataset """ - dataset = paddle.fleet.DatasetFactory().create_dataset("QueueDataset") + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( + "QueueDataset") dataset.set_batch_size(32) dataset.set_thread(3) dataset.set_filelist(files) @@ -875,7 +879,7 @@ class TestDataset2(unittest.TestCase): except ImportError as e: print("warning: no mpi4py") exe.run(startup_program) - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_batch_size(32) dataset.set_thread(3) @@ -945,7 +949,7 @@ class TestDataset2(unittest.TestCase): except ImportError as e: print("warning: no mpi4py") exe.run(startup_program) - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_batch_size(32) dataset.set_thread(3) @@ -962,12 +966,12 @@ class TestDataset2(unittest.TestCase): print("warning: catch expected error") fleet._opt_info = None fleet._fleet_ptr = None - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_rank_offset("") dataset.set_pv_batch_size(1) dataset.set_hdfs_config("", "") - d = paddle.fleet.DatasetBase() + d = paddle.distributed.fleet.DatasetBase() try: dataset.set_feed_type("MultiSlotInMemoryDataFeed") except: @@ -1000,7 +1004,7 @@ class TestDataset2(unittest.TestCase): dataset.get_pv_data_size() dataset.get_memory_data_size() dataset.get_shuffle_data_size() - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "QueueDataset") try: dataset.local_shuffle() @@ -1010,7 +1014,7 @@ class TestDataset2(unittest.TestCase): dataset.global_shuffle() except: print("warning: catch expected error") - dataset = paddle.fleet.FileInstantDataset() + dataset = paddle.distributed.fleet.FileInstantDataset() try: dataset.local_shuffle() except: diff --git a/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py b/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py index 22d59e78fff..c13c33f209f 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py +++ b/python/paddle/fluid/tests/unittests/test_dataset_dataloader.py @@ -97,7 +97,7 @@ class DatasetLoaderTestBase(unittest.TestCase): def check_batch_number(self, place, randomize_batch_num=False): main_prog, startup_prog, feeds = self.build_network() - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( self.dataset_name) dataset.set_batch_size(BATCH_SIZE) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py index a7ef230f79a..28bd637726e 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_async.py @@ -33,7 +33,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): def test_a_sync_optimizer_trainer(self): os.environ["TRAINING_ROLE"] = "TRAINER" - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet main_program = paddle.fluid.Program() startup_program = paddle.fluid.Program() @@ -53,7 +53,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) @@ -78,7 +78,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): def test_a_sync_optimizer_pserver(self): os.environ["TRAINING_ROLE"] = "PSERVER" - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet main_program = paddle.fluid.Program() startup_program = paddle.fluid.Program() @@ -98,7 +98,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py index 87ffeb31fd2..9cd35f1754f 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py @@ -32,7 +32,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): def test_a_sync_optimizer_trainer(self): os.environ["TRAINING_ROLE"] = "TRAINER" - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet main_program = paddle.fluid.Program() startup_program = paddle.fluid.Program() @@ -52,7 +52,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True strategy.a_sync_configs = {"k_steps": 100} optimizer = paddle.optimizer.SGD(learning_rate=0.01) @@ -75,7 +75,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): def test_a_sync_optimizer_pserver(self): os.environ["TRAINING_ROLE"] = "PSERVER" - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet main_program = paddle.fluid.Program() startup_program = paddle.fluid.Program() @@ -95,7 +95,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True strategy.a_sync_configs = {"k_steps": 100} optimizer = paddle.optimizer.SGD(learning_rate=0.01) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py index 6214588535f..c8130d62c30 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_sync.py @@ -15,7 +15,7 @@ import unittest import paddle import os -import paddle.fleet as fleet +import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker import time @@ -45,7 +45,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = False optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py index 8b2f7118ea7..f72850f9497 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_base.py @@ -32,8 +32,8 @@ import tempfile import unittest import paddle.fluid as fluid -import paddle.fleet.base.role_maker as role_maker -from paddle.fleet.base.util_factory import fleet_util +import paddle.distributed.fleet.base.role_maker as role_maker +from paddle.distributed.fleet.base.util_factory import fleet_util from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py index f6168008262..5fcf5d894b2 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py @@ -22,7 +22,7 @@ import shutil import paddle import paddle.fluid as fluid import paddle.fluid.incubate.fleet.base.role_maker as role_maker -import paddle.fleet as fleet +import paddle.distributed.fleet as fleet # For Net base_lr = 0.2 @@ -163,7 +163,7 @@ class TestPSPassWithBow(unittest.TestCase): fleet.init(role) loss, acc, _ = self.net() - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_amp_meta_optimizer.py b/python/paddle/fluid/tests/unittests/test_fleet_amp_meta_optimizer.py index ae4b5d7ecd7..0e19069d5c0 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_amp_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_amp_meta_optimizer.py @@ -23,7 +23,7 @@ class TestFleetAMPOptimizer(unittest.TestCase): os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" def test_amp_optimizer(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -38,7 +38,7 @@ class TestFleetAMPOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.amp = True strategy.amp_configs = { "init_loss_scaling": 32768, diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base.py b/python/paddle/fluid/tests/unittests/test_fleet_base.py index 20542da3f05..8019841f72e 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base.py @@ -26,13 +26,13 @@ class TestFleetBase(unittest.TestCase): "127.0.0.1:36001,127.0.0.2:36001" def test_init(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) def test_is_first_worker(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -40,21 +40,21 @@ class TestFleetBase(unittest.TestCase): print("test fleet first worker done.") def test_worker_index(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) print(fleet.worker_index()) def test_worker_num(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) print(fleet.worker_num()) def test_is_worker(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -62,14 +62,14 @@ class TestFleetBase(unittest.TestCase): print("test fleet is worker") def test_worker_endpoints(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) print(fleet.worker_endpoints(to_string=True)) def test_server_num(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -77,7 +77,7 @@ class TestFleetBase(unittest.TestCase): print("fleet server num: {}".format(fleet.server_num())) def test_server_index(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -85,7 +85,7 @@ class TestFleetBase(unittest.TestCase): print("fleet server index: {}".format(fleet.server_index())) def test_server_endpoints(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -94,7 +94,7 @@ class TestFleetBase(unittest.TestCase): fleet.server_endpoints(to_string=True))) def test_is_server(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -102,14 +102,14 @@ class TestFleetBase(unittest.TestCase): print("test fleet is server") def test_util(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) self.assertEqual(fleet.util, None) def test_barrier_worker(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -117,7 +117,7 @@ class TestFleetBase(unittest.TestCase): fleet.barrier_worker() def test_init_worker(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -125,7 +125,7 @@ class TestFleetBase(unittest.TestCase): fleet.init_worker() def test_run_server(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -133,7 +133,7 @@ class TestFleetBase(unittest.TestCase): fleet.run_worker() def test_stop_worker(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -141,7 +141,7 @@ class TestFleetBase(unittest.TestCase): fleet.stop_worker() def test_distributed_optimizer(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -151,7 +151,7 @@ class TestFleetBase(unittest.TestCase): def test_minimize(self): import paddle - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker input_x = paddle.fluid.layers.data( diff --git a/python/paddle/fluid/tests/unittests/test_fleet_dgc_meta_optimizer.py b/python/paddle/fluid/tests/unittests/test_fleet_dgc_meta_optimizer.py index b0a841ecba7..1d211a77008 100755 --- a/python/paddle/fluid/tests/unittests/test_fleet_dgc_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_dgc_meta_optimizer.py @@ -16,7 +16,7 @@ import unittest import paddle from paddle import fluid import os -import paddle.fleet as fleet +import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker @@ -47,7 +47,7 @@ class TestFleetDGCOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.dgc = True strategy.dgc_configs = { "rampup_begin_step": 128, diff --git a/python/paddle/fluid/tests/unittests/test_fleet_distributed_strategy.py b/python/paddle/fluid/tests/unittests/test_fleet_distributed_strategy.py index aeac7f51a24..45dd461237b 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_distributed_strategy.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_distributed_strategy.py @@ -19,7 +19,7 @@ import os class TestStrategyConfig(unittest.TestCase): def test_amp(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.amp = True self.assertEqual(strategy.amp, True) strategy.amp = False @@ -28,7 +28,7 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.amp, False) def test_amp_configs(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() configs = { "init_loss_scaling": 32768, "decr_every_n_nan_or_inf": 2, @@ -41,7 +41,7 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.amp_configs["init_loss_scaling"], 32768) def test_recompute(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.recompute = True self.assertEqual(strategy.recompute, True) strategy.recompute = False @@ -50,13 +50,13 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.recompute, False) def test_recompute_configs(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() configs = {"checkpoints": ["x", "y"]} strategy.recompute_configs = configs self.assertEqual(len(strategy.recompute_configs["checkpoints"]), 2) def test_pipeline(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.pipeline = True self.assertEqual(strategy.pipeline, True) strategy.pipeline = False @@ -65,13 +65,13 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.pipeline, False) def test_pipeline_configs(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() configs = {"micro_batch": 4} strategy.pipeline_configs = configs self.assertEqual(strategy.pipeline_configs["micro_batch"], 4) def test_localsgd(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.localsgd = True self.assertEqual(strategy.localsgd, True) strategy.localsgd = False @@ -80,13 +80,13 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.localsgd, False) def test_localsgd_configs(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() configs = {"k_steps": 4} strategy.localsgd_configs = configs self.assertEqual(strategy.localsgd_configs["k_steps"], 4) def test_dgc(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.dgc = True self.assertEqual(strategy.dgc, True) strategy.dgc = False @@ -95,7 +95,7 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.dgc, False) def test_sync_nccl_allreduce(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.sync_nccl_allreduce = True self.assertEqual(strategy.sync_nccl_allreduce, True) strategy.sync_nccl_allreduce = False @@ -104,14 +104,14 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.sync_nccl_allreduce, False) def test_nccl_comm_num(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.nccl_comm_num = 1 self.assertEqual(strategy.nccl_comm_num, 1) strategy.nccl_comm_num = "2" self.assertEqual(strategy.nccl_comm_num, 1) def test_use_hierarchical_allreduce(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.use_hierarchical_allreduce = True self.assertEqual(strategy.use_hierarchical_allreduce, True) strategy.use_hierarchical_allreduce = False @@ -120,14 +120,14 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.use_hierarchical_allreduce, False) def test_hierarchical_allreduce_inter_nranks(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.hierarchical_allreduce_inter_nranks = 8 self.assertEqual(strategy.hierarchical_allreduce_inter_nranks, 8) strategy.hierarchical_allreduce_inter_nranks = "4" self.assertEqual(strategy.hierarchical_allreduce_inter_nranks, 8) def test_sync_batch_norm(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.sync_batch_norm = True self.assertEqual(strategy.sync_batch_norm, True) strategy.sync_batch_norm = False @@ -136,7 +136,7 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.sync_batch_norm, False) def test_fuse_all_reduce_ops(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.fuse_all_reduce_ops = True self.assertEqual(strategy.fuse_all_reduce_ops, True) strategy.fuse_all_reduce_ops = False @@ -145,21 +145,21 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.fuse_all_reduce_ops, False) def test_fuse_grad_size_in_MB(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.fuse_grad_size_in_MB = 50 self.assertEqual(strategy.fuse_grad_size_in_MB, 50) strategy.fuse_grad_size_in_MB = "40" self.assertEqual(strategy.fuse_grad_size_in_MB, 50) def test_fuse_grad_size_in_TFLOPS(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy._fuse_grad_size_in_TFLOPS = 0.1 self.assertGreater(strategy._fuse_grad_size_in_TFLOPS, 0.09) strategy._fuse_grad_size_in_TFLOPS = "0.3" self.assertGreater(strategy._fuse_grad_size_in_TFLOPS, 0.09) def test_gradient_merge(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.gradient_merge = True self.assertEqual(strategy.gradient_merge, True) strategy.gradient_merge = False @@ -168,13 +168,13 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.gradient_merge, False) def test_gradient_merge_configs(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() configs = {"k_steps": 4} strategy.gradient_merge_configs = configs self.assertEqual(strategy.gradient_merge_configs["k_steps"], 4) def test_lars(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.lars = True self.assertEqual(strategy.lars, True) strategy.lars = False @@ -183,7 +183,7 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.lars, False) def test_lamb(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.lamb = True self.assertEqual(strategy.lamb, True) strategy.lamb = False @@ -192,7 +192,7 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.lamb, False) def test_a_sync(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True self.assertEqual(strategy.a_sync, True) strategy.a_sync = False @@ -202,13 +202,13 @@ class TestStrategyConfig(unittest.TestCase): strategy.a_sync = "True" def test_a_sync_configs(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() configs = {"k_steps": 1000} strategy.a_sync_configs = configs self.assertEqual(strategy.a_sync_configs["k_steps"], 1000) def test_elastic(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.elastic = True self.assertEqual(strategy.elastic, True) strategy.elastic = False @@ -217,7 +217,7 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.elastic, False) def test_auto(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.auto = True self.assertEqual(strategy.auto, True) strategy.auto = False @@ -226,7 +226,7 @@ class TestStrategyConfig(unittest.TestCase): self.assertEqual(strategy.auto, False) def test_strategy_prototxt(self): - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True strategy.localsgd = True strategy.dgc = True @@ -255,7 +255,7 @@ class TestStrategyConfig(unittest.TestCase): exe_strategy.num_iteration_per_run = 10 strategy.execution_strategy = exe_strategy strategy.save_to_prototxt("dist_strategy.prototxt") - strategy2 = paddle.fleet.DistributedStrategy() + strategy2 = paddle.distributed.fleet.DistributedStrategy() strategy2.load_from_prototxt("dist_strategy.prototxt") self.assertEqual(strategy.dgc, strategy2.dgc) @@ -277,7 +277,7 @@ class TestStrategyConfig(unittest.TestCase): build_strategy.enable_backward_optimizer_op_deps = True build_strategy.trainers_endpoints = ["1", "2"] - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.build_strategy = build_strategy def test_execution_strategy(self): @@ -286,7 +286,7 @@ class TestStrategyConfig(unittest.TestCase): exe_strategy.num_iteration_per_drop_scope = 10 exe_strategy.num_iteration_per_run = 10 - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.execution_strategy = exe_strategy diff --git a/python/paddle/fluid/tests/unittests/test_fleet_gradient_merge_meta_optimizer.py b/python/paddle/fluid/tests/unittests/test_fleet_gradient_merge_meta_optimizer.py index 36d5912cb7e..49ce09877f0 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_gradient_merge_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_gradient_merge_meta_optimizer.py @@ -15,7 +15,7 @@ import unittest import paddle import os -import paddle.fleet as fleet +import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker @@ -41,7 +41,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.gradient_merge = True strategy.gradient_merge_configs = {"k_steps": 2, "avg": True} optimizer = paddle.optimizer.SGD(learning_rate=0.01) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_graph_execution_meta_optimizer.py b/python/paddle/fluid/tests/unittests/test_fleet_graph_execution_meta_optimizer.py index 7998b1fa5d1..3e97ab3bfc6 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_graph_execution_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_graph_execution_meta_optimizer.py @@ -39,7 +39,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): } def node_func(): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -57,7 +57,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = fleet.distributed_optimizer( optimizer, strategy=strategy) @@ -90,7 +90,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): } def node_func(): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -108,7 +108,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.nccl_comm_num = 2 strategy.sync_nccl_allreduce = True optimizer = paddle.optimizer.SGD(learning_rate=0.01) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_graph_executor.py b/python/paddle/fluid/tests/unittests/test_fleet_graph_executor.py index 47e8949922a..d2e0112ba29 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_graph_executor.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_graph_executor.py @@ -39,7 +39,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): } def node_func(): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -57,7 +57,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.nccl_comm_num = 2 strategy.sync_nccl_allreduce = True optimizer = paddle.optimizer.SGD(learning_rate=0.01) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_lamb_meta_optimizer.py b/python/paddle/fluid/tests/unittests/test_fleet_lamb_meta_optimizer.py index 7384e3b4dfa..8ad051924f2 100755 --- a/python/paddle/fluid/tests/unittests/test_fleet_lamb_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_lamb_meta_optimizer.py @@ -16,7 +16,7 @@ import unittest import paddle from paddle import fluid import os -import paddle.fleet as fleet +import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker @@ -47,7 +47,7 @@ class TestFleetLambMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.lamb = True strategy.lamb_configs = { 'lamb_weight_decay': 0.01, diff --git a/python/paddle/fluid/tests/unittests/test_fleet_lars_meta_optimizer.py b/python/paddle/fluid/tests/unittests/test_fleet_lars_meta_optimizer.py index d8a56016ff3..87c4823693e 100755 --- a/python/paddle/fluid/tests/unittests/test_fleet_lars_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_lars_meta_optimizer.py @@ -16,7 +16,7 @@ import unittest import paddle from paddle import fluid import os -import paddle.fleet as fleet +import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker @@ -47,7 +47,7 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.lars = True strategy.lars_configs = { "lars_coeff": 0.001, diff --git a/python/paddle/fluid/tests/unittests/test_fleet_localsgd_meta_optimizer.py b/python/paddle/fluid/tests/unittests/test_fleet_localsgd_meta_optimizer.py index 1f2ceb298e7..f4bb8704849 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_localsgd_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_localsgd_meta_optimizer.py @@ -16,7 +16,7 @@ import unittest import paddle import os -import paddle.fleet as fleet +import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker @@ -39,7 +39,7 @@ class TestFleetLocalSGDMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.localsgd = True strategy.auto = True config = strategy.localsgd_configs diff --git a/python/paddle/fluid/tests/unittests/test_fleet_metric.py b/python/paddle/fluid/tests/unittests/test_fleet_metric.py index 2dacc02797a..6a7963f4382 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_metric.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_metric.py @@ -19,7 +19,7 @@ import paddle import paddle.fluid as fluid import os import unittest -import paddle.fleet.metrics.metric as metric +import paddle.distributed.fleet.metrics.metric as metric from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet diff --git a/python/paddle/fluid/tests/unittests/test_fleet_pipeline_meta_optimizer.py b/python/paddle/fluid/tests/unittests/test_fleet_pipeline_meta_optimizer.py index 3629eb7f227..d35f2fe5e62 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_pipeline_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_pipeline_meta_optimizer.py @@ -24,7 +24,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002" def test_pipeline_optimizer(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -49,7 +49,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.pipeline = True strategy.pipeline_configs = {'micro_batch': 2} diff --git a/python/paddle/fluid/tests/unittests/test_fleet_private_function.py b/python/paddle/fluid/tests/unittests/test_fleet_private_function.py index ec99acf1098..beec6d7f51c 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_private_function.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_private_function.py @@ -36,7 +36,7 @@ class TestFleetPrivateFunction(unittest.TestCase): thr = threading.Thread(target=init_server, args=(9292, )) thr.start() - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet ep = ["127.0.0.1:9292"] fleet.base.private_helper_function.wait_server_ready(ep) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py b/python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py index f62c8d32d6c..f07c6421192 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_recompute_meta_optimizer.py @@ -26,7 +26,7 @@ class TestFleetRecomputeMetaOptimizer(unittest.TestCase): "127.0.0.1:36001,127.0.0.2:36001" def test_recompute_optimizer(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -41,7 +41,7 @@ class TestFleetRecomputeMetaOptimizer(unittest.TestCase): input=prediction, label=input_y) avg_cost = paddle.fluid.layers.mean(x=cost) - strategy = paddle.fleet.DistributedStrategy() + strategy = paddle.distributed.fleet.DistributedStrategy() strategy.recompute = True strategy.recompute_configs = {"checkpoints": ["fc2"]} diff --git a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py index 351dc0a5d0f..eb5d9eb6660 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_2.py @@ -163,7 +163,7 @@ class TestCloudRoleMaker2(unittest.TestCase): data = "1 1 1 1\n" f.write(data) - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_filelist(["test_fleet_gloo_role_maker_1.txt"]) dataset.set_use_var([show, label]) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_4.py b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_4.py index a91f6cbd69e..6414ef18d63 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_4.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_4.py @@ -40,9 +40,9 @@ class TestCloudRoleMaker(unittest.TestCase): from paddle.fluid.incubate.fleet.parameter_server.pslib import PSLib from paddle.fluid.incubate.fleet.base.role_maker import \ GeneralRoleMaker - from paddle.fleet.utils import KVHandler - from paddle.fleet.utils import KVServer - from paddle.fleet.utils import KVHTTPServer + from paddle.distributed.fleet.utils import KVHandler + from paddle.distributed.fleet.utils import KVServer + from paddle.distributed.fleet.utils import KVHTTPServer except: print("warning: no fleet, skip test_pslib_4") return diff --git a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_new.py b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_new.py index 659cc34b549..f80d45ed5e0 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_new.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_rolemaker_new.py @@ -16,7 +16,7 @@ from __future__ import print_function import os import unittest -import paddle.fleet.base.role_maker as role_maker +import paddle.distributed.fleet.base.role_maker as role_maker class TestRoleMakerBase(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_fleet_runtime.py b/python/paddle/fluid/tests/unittests/test_fleet_runtime.py index 474e5da1c21..3fd646f4340 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_runtime.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_runtime.py @@ -19,16 +19,17 @@ import os class TestFleetRuntime(unittest.TestCase): def test_fleet_runtime_base(self): - import paddle.fleet.runtime - base = paddle.fleet.runtime.runtime_base.RuntimeBase() + import paddle.distributed.fleet.runtime + base = paddle.distributed.fleet.runtime.runtime_base.RuntimeBase() base._run_worker() base._init_server() base._run_server() base._stop_worker() def test_fleet_collective_runtime(self): - import paddle.fleet.runtime - collective_runtime = paddle.fleet.runtime.CollectiveRuntime() + import paddle.distributed.fleet.runtime + collective_runtime = paddle.distributed.fleet.runtime.CollectiveRuntime( + ) collective_runtime._init_worker() collective_runtime._run_worker() collective_runtime._init_worker() diff --git a/python/paddle/fluid/tests/unittests/test_fleet_util.py b/python/paddle/fluid/tests/unittests/test_fleet_util.py index e52cb5f920c..8dbf97b1123 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_util.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_util.py @@ -22,8 +22,8 @@ import tempfile import os import sys from paddle.dataset.common import download, DATA_HOME -from paddle.fleet.base.util_factory import fleet_util -import paddle.fleet.base.role_maker as role_maker +from paddle.distributed.fleet.base.util_factory import fleet_util +import paddle.distributed.fleet.base.role_maker as role_maker class TestFleetUtil(unittest.TestCase): @@ -34,7 +34,7 @@ class TestFleetUtil(unittest.TestCase): train_dir = os.path.join("fleet_util_data", "train_program") def test_util_base(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet util = fleet.UtilBase() strategy = fleet.DistributedStrategy() util._set_strategy(strategy) @@ -42,7 +42,7 @@ class TestFleetUtil(unittest.TestCase): util._set_role_maker(role_maker) def test_util_factory(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet factory = fleet.base.util_factory.UtilFactory() strategy = fleet.DistributedStrategy() role_maker = None # should be fleet.PaddleCloudRoleMaker() @@ -55,7 +55,7 @@ class TestFleetUtil(unittest.TestCase): self.assertEqual(util.role_maker, None) def test_get_util(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) @@ -63,7 +63,7 @@ class TestFleetUtil(unittest.TestCase): self.assertEqual(default_util, None) def test_set_user_defined_util(self): - import paddle.fleet as fleet + import paddle.distributed.fleet as fleet class UserDefinedUtil(fleet.UtilBase): def __init__(self): @@ -81,7 +81,7 @@ class TestFleetUtil(unittest.TestCase): self.assertEqual(user_id, 10) def test_fs(self): - from paddle.fleet.utils import LocalFS + from paddle.distributed.fleet.utils import LocalFS fs = LocalFS() dirs, files = fs.ls_dir("test_tmp") dirs, files = fs.ls_dir("./") diff --git a/python/paddle/fluid/tests/unittests/test_fs_interface.py b/python/paddle/fluid/tests/unittests/test_fs_interface.py index 6d78d3a4736..c01876531c9 100644 --- a/python/paddle/fluid/tests/unittests/test_fs_interface.py +++ b/python/paddle/fluid/tests/unittests/test_fs_interface.py @@ -20,7 +20,7 @@ import os import sys import inspect -from paddle.fleet.utils import LocalFS, FS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError +from paddle.distributed.fleet.utils import LocalFS, FS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError class FSTest(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_hdfs.py b/python/paddle/fluid/tests/unittests/test_hdfs.py index e0e6c8c14f6..75e2f5d6792 100644 --- a/python/paddle/fluid/tests/unittests/test_hdfs.py +++ b/python/paddle/fluid/tests/unittests/test_hdfs.py @@ -19,7 +19,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet import os import sys -from paddle.fleet.utils import LocalFS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError +from paddle.distributed.fleet.utils import LocalFS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError java_home = os.environ["JAVA_HOME"] diff --git a/python/paddle/fluid/tests/unittests/test_monitor.py b/python/paddle/fluid/tests/unittests/test_monitor.py index 2d4c8f61c04..f6207edb41c 100644 --- a/python/paddle/fluid/tests/unittests/test_monitor.py +++ b/python/paddle/fluid/tests/unittests/test_monitor.py @@ -52,7 +52,7 @@ class TestDatasetWithStat(unittest.TestCase): name=slot, shape=[1], dtype="int64", lod_level=1) slots_vars.append(var) - dataset = paddle.fleet.DatasetFactory().create_dataset( + dataset = paddle.distributed.fleet.DatasetFactory().create_dataset( "InMemoryDataset") dataset.set_batch_size(32) dataset.set_thread(3) diff --git a/python/setup.py.in b/python/setup.py.in index 43a57ae5463..29bc68444e1 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -145,14 +145,14 @@ packages=['paddle', 'paddle.incubate', 'paddle.incubate.complex', 'paddle.incubate.complex.tensor', - 'paddle.fleet', - 'paddle.fleet.base', - 'paddle.fleet.meta_optimizers', - 'paddle.fleet.runtime', - 'paddle.fleet.dataset', - 'paddle.fleet.metrics', - 'paddle.fleet.proto', - 'paddle.fleet.utils', + 'paddle.distributed.fleet', + 'paddle.distributed.fleet.base', + 'paddle.distributed.fleet.meta_optimizers', + 'paddle.distributed.fleet.runtime', + 'paddle.distributed.fleet.dataset', + 'paddle.distributed.fleet.metrics', + 'paddle.distributed.fleet.proto', + 'paddle.distributed.fleet.utils', 'paddle.framework', 'paddle.jit', 'paddle.fluid', @@ -482,7 +482,7 @@ with redirect_stdout(): }, entry_points={ 'console_scripts': [ - 'fleetrun = paddle.fleet.launch:launch' + 'fleetrun = paddle.distributed.fleet.launch:launch' ] } ) -- GitLab