未验证 提交 50a5bcfc 编写于 作者: D Dong Daxiang 提交者: GitHub

【paddle.fleet】paddle.fleet -> paddle.distributed.fleet. (#26186)

* move paddle.fleet to paddle.distributed.fleet
上级 ffe52b44
...@@ -164,23 +164,23 @@ if(WITH_PYTHON) ...@@ -164,23 +164,23 @@ if(WITH_PYTHON)
if (NOT WIN32) if (NOT WIN32)
add_custom_command(TARGET framework_py_proto POST_BUILD add_custom_command(TARGET framework_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto
COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto/__init__.py COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/__init__.py
COMMAND cp *.py ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/ COMMAND cp *.py ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/
COMMAND cp distributed_strategy_*.py ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto COMMAND cp distributed_strategy_*.py ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto
COMMENT "Copy generated python proto into directory paddle/fluid/proto." COMMENT "Copy generated python proto into directory paddle/fluid/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
else(NOT WIN32) else(NOT WIN32)
string(REPLACE "/" "\\" proto_dstpath "${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/") string(REPLACE "/" "\\" proto_dstpath "${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/")
string(REPLACE "/" "\\" fleet_proto_dstpath "${PADDLE_BINARY_DIR}/python/paddle/fleet/proto/") string(REPLACE "/" "\\" fleet_proto_dstpath "${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/")
add_custom_command(TARGET framework_py_proto POST_BUILD add_custom_command(TARGET framework_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto
COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto/__init__.py COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/__init__.py
COMMAND copy /Y *.py ${proto_dstpath} COMMAND copy /Y *.py ${proto_dstpath}
COMMAND copy /Y distributed_strategy_*.py ${fleet_proto_dstpath} COMMAND copy /Y distributed_strategy_*.py ${fleet_proto_dstpath}
COMMENT "Copy generated python proto into directory paddle/fluid/proto." COMMENT "Copy generated python proto into directory paddle/fluid/proto."
COMMENT "Copy generated python proto into directory paddle/fleet/proto." COMMENT "Copy generated python proto into directory paddle/distributed/fleet/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif(NOT WIN32) endif(NOT WIN32)
endif() endif()
......
...@@ -36,7 +36,7 @@ import paddle.distributed ...@@ -36,7 +36,7 @@ import paddle.distributed
import paddle.sysconfig import paddle.sysconfig
import paddle.tensor import paddle.tensor
import paddle.nn import paddle.nn
import paddle.fleet import paddle.distributed.fleet
import paddle.framework import paddle.framework
import paddle.optimizer import paddle.optimizer
import paddle.metric import paddle.metric
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
import paddle import paddle
from paddle.fleet.proto import distributed_strategy_pb2 from paddle.distributed.fleet.proto import distributed_strategy_pb2
from paddle.fluid.framework import Variable from paddle.fluid.framework import Variable
import google.protobuf.text_format import google.protobuf.text_format
...@@ -103,7 +103,7 @@ class DistributedStrategy(object): ...@@ -103,7 +103,7 @@ class DistributedStrategy(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
strategy.dgc = True strategy.dgc = True
strategy.recompute = True strategy.recompute = True
...@@ -120,7 +120,7 @@ class DistributedStrategy(object): ...@@ -120,7 +120,7 @@ class DistributedStrategy(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
strategy.load_from_prototxt("dist_strategy.protoxt") strategy.load_from_prototxt("dist_strategy.protoxt")
""" """
...@@ -141,7 +141,7 @@ class DistributedStrategy(object): ...@@ -141,7 +141,7 @@ class DistributedStrategy(object):
exe_strategy.num_iteration_per_drop_scope = 10 exe_strategy.num_iteration_per_drop_scope = 10
exe_strategy.num_iteration_per_run = 10 exe_strategy.num_iteration_per_run = 10
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.execution_strategy = exe_strategy strategy.execution_strategy = exe_strategy
""" """
execution_strategy = paddle.fluid.ExecutionStrategy() execution_strategy = paddle.fluid.ExecutionStrategy()
...@@ -178,7 +178,7 @@ class DistributedStrategy(object): ...@@ -178,7 +178,7 @@ class DistributedStrategy(object):
build_strategy.fuse_all_optimizer_ops = True build_strategy.fuse_all_optimizer_ops = True
build_strategy.enable_inplace = True build_strategy.enable_inplace = True
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.build_strategy = build_strategy strategy.build_strategy = build_strategy
""" """
...@@ -211,7 +211,7 @@ class DistributedStrategy(object): ...@@ -211,7 +211,7 @@ class DistributedStrategy(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
role_maker = fleet.PaddleCloudRoleMaker() role_maker = fleet.PaddleCloudRoleMaker()
fleet.init(role_maker) fleet.init(role_maker)
...@@ -253,7 +253,7 @@ class DistributedStrategy(object): ...@@ -253,7 +253,7 @@ class DistributedStrategy(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
role_maker = fleet.PaddleCloudRoleMaker() role_maker = fleet.PaddleCloudRoleMaker()
fleet.init(role_maker) fleet.init(role_maker)
...@@ -282,7 +282,7 @@ class DistributedStrategy(object): ...@@ -282,7 +282,7 @@ class DistributedStrategy(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
strategy.amp = True # by default this is false strategy.amp = True # by default this is false
...@@ -314,7 +314,7 @@ class DistributedStrategy(object): ...@@ -314,7 +314,7 @@ class DistributedStrategy(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
strategy.recompute = True strategy.recompute = True
# suppose x and y are names of checkpoint tensors for recomputation # suppose x and y are names of checkpoint tensors for recomputation
...@@ -432,7 +432,7 @@ class DistributedStrategy(object): ...@@ -432,7 +432,7 @@ class DistributedStrategy(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
strategy.recompute = True strategy.recompute = True
strategy.recompute_configs = {"checkpionts": ["x", "y"]} strategy.recompute_configs = {"checkpionts": ["x", "y"]}
...@@ -457,7 +457,7 @@ class DistributedStrategy(object): ...@@ -457,7 +457,7 @@ class DistributedStrategy(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
strategy.pipeline = True strategy.pipeline = True
...@@ -490,7 +490,7 @@ class DistributedStrategy(object): ...@@ -490,7 +490,7 @@ class DistributedStrategy(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
strategy.pipeline = True strategy.pipeline = True
strategy.pipeline_configs = {"micro_batch": 12} strategy.pipeline_configs = {"micro_batch": 12}
...@@ -560,7 +560,7 @@ class DistributedStrategy(object): ...@@ -560,7 +560,7 @@ class DistributedStrategy(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
strategy.gradient_merge = True strategy.gradient_merge = True
strategy.gradient_merge_configs = {"k_steps": 4, "avg": True} strategy.gradient_merge_configs = {"k_steps": 4, "avg": True}
...@@ -583,7 +583,7 @@ class DistributedStrategy(object): ...@@ -583,7 +583,7 @@ class DistributedStrategy(object):
avg (bool): whether to average the gradients of each mini-batch, avg (bool): whether to average the gradients of each mini-batch,
the default value is `True` the default value is `True`
Example: Example:
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
strategy.gradient_merge = True strategy.gradient_merge = True
strategy.gradient_merge_configs = {"k_steps": 4, "avg": True} strategy.gradient_merge_configs = {"k_steps": 4, "avg": True}
......
...@@ -34,9 +34,8 @@ class Fleet(object): ...@@ -34,9 +34,8 @@ class Fleet(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = fleet.role_maker.PaddleCloudRoleMaker(is_collective=True)
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.001) optimizer = paddle.optimizer.SGD(learning_rate=0.001)
...@@ -218,9 +217,8 @@ class Fleet(object): ...@@ -218,9 +217,8 @@ class Fleet(object):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker role = fleet.role_maker.PaddleCloudRoleMaker(is_collective=True)
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.001) optimizer = paddle.optimizer.SGD(learning_rate=0.001)
...@@ -260,8 +258,7 @@ class Fleet(object): ...@@ -260,8 +258,7 @@ class Fleet(object):
Examples: Examples:
import paddle import paddle
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
fc_1 = paddle.layers.fc(input=input_x, size=hid_dim, act='tanh') fc_1 = paddle.layers.fc(input=input_x, size=hid_dim, act='tanh')
fc_2 = paddlen.layers.fc(input=fc_1, size=hid_dim, act='tanh') fc_2 = paddlen.layers.fc(input=fc_1, size=hid_dim, act='tanh')
...@@ -269,7 +266,7 @@ class Fleet(object): ...@@ -269,7 +266,7 @@ class Fleet(object):
cost = paddle.layers.cross_entropy(input=prediction, label=input_y) cost = paddle.layers.cross_entropy(input=prediction, label=input_y)
avg_cost = paddle.layers.mean(x=cost) avg_cost = paddle.layers.mean(x=cost)
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = fleet.role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.001) optimizer = paddle.optimizer.SGD(learning_rate=0.001)
......
...@@ -481,7 +481,7 @@ class PaddleCloudRoleMaker(RoleMakerBase): ...@@ -481,7 +481,7 @@ class PaddleCloudRoleMaker(RoleMakerBase):
return "lo" return "lo"
def __start_kv_server(self, http_server_d, size_d): def __start_kv_server(self, http_server_d, size_d):
from paddle.fleet.utils import KVServer from paddle.distributed.fleet.utils import KVServer
http_server = KVServer(int(self._http_ip_port[1]), size_d) http_server = KVServer(int(self._http_ip_port[1]), size_d)
http_server.start() http_server.start()
wait_seconds = 5 wait_seconds = 5
......
...@@ -55,8 +55,8 @@ class UtilBase(object): ...@@ -55,8 +55,8 @@ class UtilBase(object):
def set_file_system(self, fs_client): def set_file_system(self, fs_client):
assert isinstance( assert isinstance(
fs_client, fs_client, FS
FS), "fs_client must be the instance of paddle.fleet.utils.FS" ), "fs_client must be the instance of paddle.distributed.fleet.utils.FS"
self.fs_client = fs_client self.fs_client = fs_client
def __check_comm_world(self, comm_world="worker"): def __check_comm_world(self, comm_world="worker"):
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
import os import os
import paddle import paddle
from paddle.fleet.launch_utils import get_cluster, logger from paddle.distributed.fleet.launch_utils import get_cluster, logger
def get_cloud_cluster(args_node_ips, selected_gpus, args_port=6170): def get_cloud_cluster(args_node_ips, selected_gpus, args_port=6170):
......
...@@ -66,8 +66,8 @@ from argparse import ArgumentParser, REMAINDER ...@@ -66,8 +66,8 @@ from argparse import ArgumentParser, REMAINDER
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fleet.launch_utils import * from paddle.distributed.fleet.launch_utils import *
import paddle.fleet.cloud_utils as cloud_utils import paddle.distributed.fleet.cloud_utils as cloud_utils
def _print_arguments(args): def _print_arguments(args):
......
...@@ -43,7 +43,7 @@ def sum(input, scope=None): ...@@ -43,7 +43,7 @@ def sum(input, scope=None):
# in train.py, after train or infer # in train.py, after train or infer
res = np.array(scope.find_var(global_cnt.name).get_tensor()) res = np.array(scope.find_var(global_cnt.name).get_tensor())
print("sum array: ", paddle.fleet.sum(res)) print("sum array: ", paddle.distributed.fleet.sum(res))
""" """
fleet._role_maker._barrier_worker() fleet._role_maker._barrier_worker()
if scope is None: if scope is None:
...@@ -82,7 +82,7 @@ def max(input, scope=None): ...@@ -82,7 +82,7 @@ def max(input, scope=None):
# in train.py, after train or infer # in train.py, after train or infer
res = np.array(scope.find_var(global_cnt.name).get_tensor()) res = np.array(scope.find_var(global_cnt.name).get_tensor())
print("max array: ", paddle.fleet.max(res)) print("max array: ", paddle.distributed.fleet.max(res))
""" """
fleet._role_maker._barrier_worker() fleet._role_maker._barrier_worker()
if scope is None: if scope is None:
...@@ -121,7 +121,7 @@ def min(input, scope=None): ...@@ -121,7 +121,7 @@ def min(input, scope=None):
# in train.py, after train or infer # in train.py, after train or infer
res = np.array(scope.find_var(global_cnt.name).get_tensor()) res = np.array(scope.find_var(global_cnt.name).get_tensor())
print("min array: ", paddle.fleet.min(res)) print("min array: ", paddle.distributed.fleet.min(res))
""" """
fleet._role_maker._barrier_worker() fleet._role_maker._barrier_worker()
if scope is None: if scope is None:
...@@ -162,7 +162,7 @@ def auc(stat_pos, stat_neg, scope=None): ...@@ -162,7 +162,7 @@ def auc(stat_pos, stat_neg, scope=None):
# in train.py, after train or infer # in train.py, after train or infer
pos = np.array(scope.find_var(stat_pos.name).get_tensor()) pos = np.array(scope.find_var(stat_pos.name).get_tensor())
neg = np.array(scope.find_var(stat_neg.name).get_tensor()) neg = np.array(scope.find_var(stat_neg.name).get_tensor())
print("auc: ", paddle.fleet.auc(pos, neg)) print("auc: ", paddle.distributed.fleet.auc(pos, neg))
""" """
fleet._role_maker._barrier_worker() fleet._role_maker._barrier_worker()
if scope is None: if scope is None:
...@@ -240,7 +240,7 @@ def mae(abserr, total_ins_num, scope=None): ...@@ -240,7 +240,7 @@ def mae(abserr, total_ins_num, scope=None):
# in train.py, after train or infer # in train.py, after train or infer
res = np.array(scope.find_var(abserr.name).get_tensor()) res = np.array(scope.find_var(abserr.name).get_tensor())
print("mae: ", paddle.fleet.mae(res, total_ins_num)) print("mae: ", paddle.distributed.fleet.mae(res, total_ins_num))
""" """
fleet._role_maker._barrier_worker() fleet._role_maker._barrier_worker()
if scope is None: if scope is None:
...@@ -278,7 +278,7 @@ def rmse(sqrerr, total_ins_num, scope=None): ...@@ -278,7 +278,7 @@ def rmse(sqrerr, total_ins_num, scope=None):
# in train.py, after train or infer # in train.py, after train or infer
res = np.array(scope.find_var(sqrerr.name).get_tensor()) res = np.array(scope.find_var(sqrerr.name).get_tensor())
print("rmse: ", paddle.fleet.rmse(res, total_ins_num)) print("rmse: ", paddle.distributed.fleet.rmse(res, total_ins_num))
""" """
fleet._role_maker._barrier_worker() fleet._role_maker._barrier_worker()
if scope is None: if scope is None:
...@@ -316,7 +316,7 @@ def mse(sqrerr, total_ins_num, scope=None): ...@@ -316,7 +316,7 @@ def mse(sqrerr, total_ins_num, scope=None):
# in train.py, after train or infer # in train.py, after train or infer
metric = np.array(scope.find_var(sqrerr.name).get_tensor()) metric = np.array(scope.find_var(sqrerr.name).get_tensor())
print("mse: ", paddle.fleet.mse(metric, total_ins_num)) print("mse: ", paddle.distributed.fleet.mse(metric, total_ins_num))
""" """
fleet._role_maker._barrier_worker() fleet._role_maker._barrier_worker()
if scope is None: if scope is None:
...@@ -365,7 +365,7 @@ def acc(correct, total, scope=None): ...@@ -365,7 +365,7 @@ def acc(correct, total, scope=None):
# in train.py, after train or infer # in train.py, after train or infer
correct_num = np.array(scope.find_var(correct.name).get_tensor()) correct_num = np.array(scope.find_var(correct.name).get_tensor())
total_num = np.array(scope.find_var(total.name).get_tensor()) total_num = np.array(scope.find_var(total.name).get_tensor())
print("accuracy: ", paddle.fleet.acc(correct_num, total_num)) print("accuracy: ", paddle.distributed.fleet.acc(correct_num, total_num))
""" """
fleet._role_maker._barrier_worker() fleet._role_maker._barrier_worker()
if scope is None: if scope is None:
......
...@@ -305,7 +305,7 @@ class TrainEpochRange(SerializableBase): ...@@ -305,7 +305,7 @@ class TrainEpochRange(SerializableBase):
if self._checker.ce_test: if self._checker.ce_test:
config = None config = None
from paddle.fleet.utils.fs import HDFSClient from paddle.distributed.fleet.utils.fs import HDFSClient
self._hdfs = HDFSClient(self._checker.hdfs_home, config) self._hdfs = HDFSClient(self._checker.hdfs_home, config)
self._cper = CheckpointSaver(self._hdfs) self._cper = CheckpointSaver(self._hdfs)
......
...@@ -79,7 +79,7 @@ class CheckpointSaver(object): ...@@ -79,7 +79,7 @@ class CheckpointSaver(object):
tmp_path = "{}.tmp".format(real_path) tmp_path = "{}.tmp".format(real_path)
saved_path = tmp_path saved_path = tmp_path
from paddle.fleet.utils.fs import LocalFS from paddle.distributed.fleet.utils.fs import LocalFS
local_fs = LocalFS() local_fs = LocalFS()
cache_path = None cache_path = None
...@@ -134,7 +134,7 @@ class CheckpointSaver(object): ...@@ -134,7 +134,7 @@ class CheckpointSaver(object):
assert isinstance(checkpoint_no, int) assert isinstance(checkpoint_no, int)
assert checkpoint_no >= 0 assert checkpoint_no >= 0
from paddle.fleet.utils.fs import LocalFS from paddle.distributed.fleet.utils.fs import LocalFS
local_fs = LocalFS() local_fs = LocalFS()
if self._fs.need_upload_download(): if self._fs.need_upload_download():
cache_path = "{}/{}.{}.load_cache".format( cache_path = "{}/{}.{}.load_cache".format(
......
...@@ -21,7 +21,7 @@ from paddle.fluid.executor import Executor ...@@ -21,7 +21,7 @@ from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import SGD from paddle.fluid.optimizer import SGD
from paddle.fluid.incubate.fleet.base.mode import Mode from paddle.fluid.incubate.fleet.base.mode import Mode
from paddle.fleet.base.role_maker import RoleMakerBase from paddle.distributed.fleet.base.role_maker import RoleMakerBase
from paddle.fluid.contrib.mixed_precision.decorator import OptimizerWithMixedPrecision from paddle.fluid.contrib.mixed_precision.decorator import OptimizerWithMixedPrecision
from . import mode from . import mode
......
...@@ -1709,7 +1709,7 @@ class PyReader(DataLoaderBase): ...@@ -1709,7 +1709,7 @@ class PyReader(DataLoaderBase):
class DatasetLoader(DataLoaderBase): class DatasetLoader(DataLoaderBase):
def __init__(self, dataset, places, drop_last): def __init__(self, dataset, places, drop_last):
assert isinstance(dataset, paddle.fleet.dataset. assert isinstance(dataset, paddle.distributed.fleet.dataset.
DatasetBase), "dataset must be type of DatasetBase" DatasetBase), "dataset must be type of DatasetBase"
assert not in_dygraph_mode( assert not in_dygraph_mode(
), "DatasetLoader is not supported in dygraph mode yet" ), "DatasetLoader is not supported in dygraph mode yet"
...@@ -1725,7 +1725,7 @@ class DatasetLoader(DataLoaderBase): ...@@ -1725,7 +1725,7 @@ class DatasetLoader(DataLoaderBase):
dataset.set_thread(thread_num) dataset.set_thread(thread_num)
if isinstance(dataset, paddle.fleet.dataset. if isinstance(dataset, paddle.distributed.fleet.dataset.
InMemoryDataset) and dataset.queue_num > thread_num: InMemoryDataset) and dataset.queue_num > thread_num:
logging.warn("queue_num {} which is set in Dataset is ignored". logging.warn("queue_num {} which is set in Dataset is ignored".
format(dataset.queue_num)) format(dataset.queue_num))
......
文件模式从 100755 更改为 100644
...@@ -18,7 +18,7 @@ import unittest ...@@ -18,7 +18,7 @@ import unittest
import os import os
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fleet.base.private_helper_function import wait_server_ready from paddle.distributed.fleet.base.private_helper_function import wait_server_ready
class TestCCommInitOp(unittest.TestCase): class TestCCommInitOp(unittest.TestCase):
......
...@@ -28,7 +28,7 @@ import numpy as np ...@@ -28,7 +28,7 @@ import numpy as np
import ctr_dataset_reader import ctr_dataset_reader
from test_dist_fleet_base import runtime_main, FleetDistRunnerBase from test_dist_fleet_base import runtime_main, FleetDistRunnerBase
from paddle.fleet.base.util_factory import fleet_util from paddle.distributed.fleet.base.util_factory import fleet_util
# Fix seed for test # Fix seed for test
fluid.default_startup_program().random_seed = 1 fluid.default_startup_program().random_seed = 1
...@@ -217,7 +217,7 @@ class TestDistCTR2x2(FleetDistRunnerBase): ...@@ -217,7 +217,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
filelist.append(train_file_path) filelist.append(train_file_path)
# config dataset # config dataset
dataset = paddle.fleet.DatasetFactory().create_dataset() dataset = paddle.distributed.fleet.DatasetFactory().create_dataset()
dataset.set_batch_size(batch_size) dataset.set_batch_size(batch_size)
dataset.set_use_var(self.feeds) dataset.set_use_var(self.feeds)
pipe_command = 'python ctr_dataset_reader.py' pipe_command = 'python ctr_dataset_reader.py'
......
...@@ -20,7 +20,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet ...@@ -20,7 +20,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet
import os import os
import sys import sys
from paddle.fleet.utils.fs import LocalFS, HDFSClient from paddle.distributed.fleet.utils.fs import LocalFS, HDFSClient
import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp
from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel
from paddle.fluid.framework import program_guard from paddle.fluid.framework import program_guard
......
...@@ -20,7 +20,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet ...@@ -20,7 +20,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet
import os import os
import sys import sys
from paddle.fleet.utils.fs import LocalFS, HDFSClient from paddle.distributed.fleet.utils.fs import LocalFS, HDFSClient
import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp
from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel
from paddle.fluid.framework import program_guard from paddle.fluid.framework import program_guard
......
...@@ -26,7 +26,7 @@ import paddle ...@@ -26,7 +26,7 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
class TestCommunicatorGeoEnd2End(unittest.TestCase): class TestCommunicatorGeoEnd2End(unittest.TestCase):
...@@ -108,7 +108,7 @@ class TestCommunicatorGeoEnd2End(unittest.TestCase): ...@@ -108,7 +108,7 @@ class TestCommunicatorGeoEnd2End(unittest.TestCase):
role = role_maker.PaddleCloudRoleMaker() role = role_maker.PaddleCloudRoleMaker()
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True strategy.a_sync = True
strategy.a_sync_configs = {"k_steps": 100} strategy.a_sync_configs = {"k_steps": 100}
...@@ -136,7 +136,7 @@ import paddle.fluid as fluid ...@@ -136,7 +136,7 @@ import paddle.fluid as fluid
from paddle.fluid.communicator import Communicator from paddle.fluid.communicator import Communicator
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
from test_communicator_geo import TestCommunicatorGeoEnd2End from test_communicator_geo import TestCommunicatorGeoEnd2End
......
...@@ -22,7 +22,7 @@ import paddle ...@@ -22,7 +22,7 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
class TestCommunicator(unittest.TestCase): class TestCommunicator(unittest.TestCase):
...@@ -51,7 +51,7 @@ class TestCommunicator(unittest.TestCase): ...@@ -51,7 +51,7 @@ class TestCommunicator(unittest.TestCase):
optimizer = fluid.optimizer.SGD(0.01) optimizer = fluid.optimizer.SGD(0.01)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = False strategy.a_sync = False
optimizer = fleet.distributed_optimizer(optimizer, strategy) optimizer = fleet.distributed_optimizer(optimizer, strategy)
......
...@@ -38,25 +38,25 @@ class TestDataset(unittest.TestCase): ...@@ -38,25 +38,25 @@ class TestDataset(unittest.TestCase):
def test_dataset_create(self): def test_dataset_create(self):
""" Testcase for dataset create. """ """ Testcase for dataset create. """
try: try:
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
except: except:
self.assertTrue(False) self.assertTrue(False)
try: try:
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset") "QueueDataset")
except: except:
self.assertTrue(False) self.assertTrue(False)
try: try:
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"FileInstantDataset") "FileInstantDataset")
except: except:
self.assertTrue(False) self.assertTrue(False)
try: try:
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"MyOwnDataset") "MyOwnDataset")
self.assertTrue(False) self.assertTrue(False)
except: except:
...@@ -95,7 +95,7 @@ class TestDataset(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="int64", lod_level=1) name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var) slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(3) dataset.set_thread(3)
...@@ -176,7 +176,7 @@ class TestDataset(unittest.TestCase): ...@@ -176,7 +176,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="int64", lod_level=1) name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var) slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(3) dataset.set_thread(3)
...@@ -228,7 +228,7 @@ class TestDataset(unittest.TestCase): ...@@ -228,7 +228,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="int64", lod_level=1) name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var) slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(3) dataset.set_thread(3)
...@@ -300,7 +300,7 @@ class TestDataset(unittest.TestCase): ...@@ -300,7 +300,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="float32", lod_level=1) name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var) slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(1) dataset.set_thread(1)
...@@ -367,7 +367,7 @@ class TestDataset(unittest.TestCase): ...@@ -367,7 +367,7 @@ class TestDataset(unittest.TestCase):
name="slot4", shape=[1], dtype="float32", lod_level=0) name="slot4", shape=[1], dtype="float32", lod_level=0)
slots_vars = [var1, var2, var3, var4] slots_vars = [var1, var2, var3, var4]
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(1) dataset.set_thread(1)
...@@ -423,7 +423,7 @@ class TestDataset(unittest.TestCase): ...@@ -423,7 +423,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="float32", lod_level=1) name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var) slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(3) dataset.set_thread(3)
...@@ -517,7 +517,8 @@ class TestDataset(unittest.TestCase): ...@@ -517,7 +517,8 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="int64", lod_level=1) name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var) slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset("QueueDataset") dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(3) dataset.set_thread(3)
dataset.set_filelist( dataset.set_filelist(
...@@ -542,7 +543,8 @@ class TestDataset(unittest.TestCase): ...@@ -542,7 +543,8 @@ class TestDataset(unittest.TestCase):
except Exception as e: except Exception as e:
self.assertTrue(False) self.assertTrue(False)
dataset2 = paddle.fleet.DatasetFactory().create_dataset("QueueDataset") dataset2 = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset")
dataset2.set_use_var(slots_vars) dataset2.set_use_var(slots_vars)
dataset2.set_batch_size(32) dataset2.set_batch_size(32)
dataset2.set_thread(3) dataset2.set_thread(3)
...@@ -583,7 +585,8 @@ class TestDataset(unittest.TestCase): ...@@ -583,7 +585,8 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="float32", lod_level=1) name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var) slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset("QueueDataset") dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(3) dataset.set_thread(3)
dataset.set_filelist( dataset.set_filelist(
...@@ -638,7 +641,7 @@ class TestDataset(unittest.TestCase): ...@@ -638,7 +641,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[None, 1], dtype="int64", lod_level=1) name=slot, shape=[None, 1], dtype="int64", lod_level=1)
slots_vars.append(var) slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_input_type(1) dataset.set_input_type(1)
dataset.set_batch_size(1) dataset.set_batch_size(1)
...@@ -718,7 +721,8 @@ class TestDatasetWithFetchHandler(unittest.TestCase): ...@@ -718,7 +721,8 @@ class TestDatasetWithFetchHandler(unittest.TestCase):
inputs(list): inputs of get_dataset inputs(list): inputs of get_dataset
files(list): files of get_dataset files(list): files of get_dataset
""" """
dataset = paddle.fleet.DatasetFactory().create_dataset("QueueDataset") dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(3) dataset.set_thread(3)
dataset.set_filelist(files) dataset.set_filelist(files)
...@@ -875,7 +879,7 @@ class TestDataset2(unittest.TestCase): ...@@ -875,7 +879,7 @@ class TestDataset2(unittest.TestCase):
except ImportError as e: except ImportError as e:
print("warning: no mpi4py") print("warning: no mpi4py")
exe.run(startup_program) exe.run(startup_program)
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(3) dataset.set_thread(3)
...@@ -945,7 +949,7 @@ class TestDataset2(unittest.TestCase): ...@@ -945,7 +949,7 @@ class TestDataset2(unittest.TestCase):
except ImportError as e: except ImportError as e:
print("warning: no mpi4py") print("warning: no mpi4py")
exe.run(startup_program) exe.run(startup_program)
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(3) dataset.set_thread(3)
...@@ -962,12 +966,12 @@ class TestDataset2(unittest.TestCase): ...@@ -962,12 +966,12 @@ class TestDataset2(unittest.TestCase):
print("warning: catch expected error") print("warning: catch expected error")
fleet._opt_info = None fleet._opt_info = None
fleet._fleet_ptr = None fleet._fleet_ptr = None
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_rank_offset("") dataset.set_rank_offset("")
dataset.set_pv_batch_size(1) dataset.set_pv_batch_size(1)
dataset.set_hdfs_config("", "") dataset.set_hdfs_config("", "")
d = paddle.fleet.DatasetBase() d = paddle.distributed.fleet.DatasetBase()
try: try:
dataset.set_feed_type("MultiSlotInMemoryDataFeed") dataset.set_feed_type("MultiSlotInMemoryDataFeed")
except: except:
...@@ -1000,7 +1004,7 @@ class TestDataset2(unittest.TestCase): ...@@ -1000,7 +1004,7 @@ class TestDataset2(unittest.TestCase):
dataset.get_pv_data_size() dataset.get_pv_data_size()
dataset.get_memory_data_size() dataset.get_memory_data_size()
dataset.get_shuffle_data_size() dataset.get_shuffle_data_size()
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset") "QueueDataset")
try: try:
dataset.local_shuffle() dataset.local_shuffle()
...@@ -1010,7 +1014,7 @@ class TestDataset2(unittest.TestCase): ...@@ -1010,7 +1014,7 @@ class TestDataset2(unittest.TestCase):
dataset.global_shuffle() dataset.global_shuffle()
except: except:
print("warning: catch expected error") print("warning: catch expected error")
dataset = paddle.fleet.FileInstantDataset() dataset = paddle.distributed.fleet.FileInstantDataset()
try: try:
dataset.local_shuffle() dataset.local_shuffle()
except: except:
......
...@@ -97,7 +97,7 @@ class DatasetLoaderTestBase(unittest.TestCase): ...@@ -97,7 +97,7 @@ class DatasetLoaderTestBase(unittest.TestCase):
def check_batch_number(self, place, randomize_batch_num=False): def check_batch_number(self, place, randomize_batch_num=False):
main_prog, startup_prog, feeds = self.build_network() main_prog, startup_prog, feeds = self.build_network()
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
self.dataset_name) self.dataset_name)
dataset.set_batch_size(BATCH_SIZE) dataset.set_batch_size(BATCH_SIZE)
......
...@@ -33,7 +33,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): ...@@ -33,7 +33,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
def test_a_sync_optimizer_trainer(self): def test_a_sync_optimizer_trainer(self):
os.environ["TRAINING_ROLE"] = "TRAINER" os.environ["TRAINING_ROLE"] = "TRAINER"
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
main_program = paddle.fluid.Program() main_program = paddle.fluid.Program()
startup_program = paddle.fluid.Program() startup_program = paddle.fluid.Program()
...@@ -53,7 +53,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): ...@@ -53,7 +53,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True strategy.a_sync = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
...@@ -78,7 +78,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): ...@@ -78,7 +78,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
def test_a_sync_optimizer_pserver(self): def test_a_sync_optimizer_pserver(self):
os.environ["TRAINING_ROLE"] = "PSERVER" os.environ["TRAINING_ROLE"] = "PSERVER"
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
main_program = paddle.fluid.Program() main_program = paddle.fluid.Program()
startup_program = paddle.fluid.Program() startup_program = paddle.fluid.Program()
...@@ -98,7 +98,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): ...@@ -98,7 +98,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True strategy.a_sync = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
......
...@@ -32,7 +32,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): ...@@ -32,7 +32,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
def test_a_sync_optimizer_trainer(self): def test_a_sync_optimizer_trainer(self):
os.environ["TRAINING_ROLE"] = "TRAINER" os.environ["TRAINING_ROLE"] = "TRAINER"
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
main_program = paddle.fluid.Program() main_program = paddle.fluid.Program()
startup_program = paddle.fluid.Program() startup_program = paddle.fluid.Program()
...@@ -52,7 +52,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): ...@@ -52,7 +52,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True strategy.a_sync = True
strategy.a_sync_configs = {"k_steps": 100} strategy.a_sync_configs = {"k_steps": 100}
optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = paddle.optimizer.SGD(learning_rate=0.01)
...@@ -75,7 +75,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): ...@@ -75,7 +75,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
def test_a_sync_optimizer_pserver(self): def test_a_sync_optimizer_pserver(self):
os.environ["TRAINING_ROLE"] = "PSERVER" os.environ["TRAINING_ROLE"] = "PSERVER"
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
main_program = paddle.fluid.Program() main_program = paddle.fluid.Program()
startup_program = paddle.fluid.Program() startup_program = paddle.fluid.Program()
...@@ -95,7 +95,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True strategy.a_sync = True
strategy.a_sync_configs = {"k_steps": 100} strategy.a_sync_configs = {"k_steps": 100}
optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = paddle.optimizer.SGD(learning_rate=0.01)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import paddle import paddle
import os import os
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
import time import time
...@@ -45,7 +45,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): ...@@ -45,7 +45,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = False strategy.a_sync = False
optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
......
...@@ -32,8 +32,8 @@ import tempfile ...@@ -32,8 +32,8 @@ import tempfile
import unittest import unittest
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fleet.base.role_maker as role_maker import paddle.distributed.fleet.base.role_maker as role_maker
from paddle.fleet.base.util_factory import fleet_util from paddle.distributed.fleet.base.util_factory import fleet_util
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory
......
...@@ -22,7 +22,7 @@ import shutil ...@@ -22,7 +22,7 @@ import shutil
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
# For Net # For Net
base_lr = 0.2 base_lr = 0.2
...@@ -163,7 +163,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -163,7 +163,7 @@ class TestPSPassWithBow(unittest.TestCase):
fleet.init(role) fleet.init(role)
loss, acc, _ = self.net() loss, acc, _ = self.net()
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True strategy.a_sync = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
......
...@@ -23,7 +23,7 @@ class TestFleetAMPOptimizer(unittest.TestCase): ...@@ -23,7 +23,7 @@ class TestFleetAMPOptimizer(unittest.TestCase):
os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001"
def test_amp_optimizer(self): def test_amp_optimizer(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -38,7 +38,7 @@ class TestFleetAMPOptimizer(unittest.TestCase): ...@@ -38,7 +38,7 @@ class TestFleetAMPOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.amp = True strategy.amp = True
strategy.amp_configs = { strategy.amp_configs = {
"init_loss_scaling": 32768, "init_loss_scaling": 32768,
......
...@@ -26,13 +26,13 @@ class TestFleetBase(unittest.TestCase): ...@@ -26,13 +26,13 @@ class TestFleetBase(unittest.TestCase):
"127.0.0.1:36001,127.0.0.2:36001" "127.0.0.1:36001,127.0.0.2:36001"
def test_init(self): def test_init(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
def test_is_first_worker(self): def test_is_first_worker(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -40,21 +40,21 @@ class TestFleetBase(unittest.TestCase): ...@@ -40,21 +40,21 @@ class TestFleetBase(unittest.TestCase):
print("test fleet first worker done.") print("test fleet first worker done.")
def test_worker_index(self): def test_worker_index(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
print(fleet.worker_index()) print(fleet.worker_index())
def test_worker_num(self): def test_worker_num(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
print(fleet.worker_num()) print(fleet.worker_num())
def test_is_worker(self): def test_is_worker(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -62,14 +62,14 @@ class TestFleetBase(unittest.TestCase): ...@@ -62,14 +62,14 @@ class TestFleetBase(unittest.TestCase):
print("test fleet is worker") print("test fleet is worker")
def test_worker_endpoints(self): def test_worker_endpoints(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
print(fleet.worker_endpoints(to_string=True)) print(fleet.worker_endpoints(to_string=True))
def test_server_num(self): def test_server_num(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -77,7 +77,7 @@ class TestFleetBase(unittest.TestCase): ...@@ -77,7 +77,7 @@ class TestFleetBase(unittest.TestCase):
print("fleet server num: {}".format(fleet.server_num())) print("fleet server num: {}".format(fleet.server_num()))
def test_server_index(self): def test_server_index(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -85,7 +85,7 @@ class TestFleetBase(unittest.TestCase): ...@@ -85,7 +85,7 @@ class TestFleetBase(unittest.TestCase):
print("fleet server index: {}".format(fleet.server_index())) print("fleet server index: {}".format(fleet.server_index()))
def test_server_endpoints(self): def test_server_endpoints(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -94,7 +94,7 @@ class TestFleetBase(unittest.TestCase): ...@@ -94,7 +94,7 @@ class TestFleetBase(unittest.TestCase):
fleet.server_endpoints(to_string=True))) fleet.server_endpoints(to_string=True)))
def test_is_server(self): def test_is_server(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -102,14 +102,14 @@ class TestFleetBase(unittest.TestCase): ...@@ -102,14 +102,14 @@ class TestFleetBase(unittest.TestCase):
print("test fleet is server") print("test fleet is server")
def test_util(self): def test_util(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
self.assertEqual(fleet.util, None) self.assertEqual(fleet.util, None)
def test_barrier_worker(self): def test_barrier_worker(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -117,7 +117,7 @@ class TestFleetBase(unittest.TestCase): ...@@ -117,7 +117,7 @@ class TestFleetBase(unittest.TestCase):
fleet.barrier_worker() fleet.barrier_worker()
def test_init_worker(self): def test_init_worker(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -125,7 +125,7 @@ class TestFleetBase(unittest.TestCase): ...@@ -125,7 +125,7 @@ class TestFleetBase(unittest.TestCase):
fleet.init_worker() fleet.init_worker()
def test_run_server(self): def test_run_server(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -133,7 +133,7 @@ class TestFleetBase(unittest.TestCase): ...@@ -133,7 +133,7 @@ class TestFleetBase(unittest.TestCase):
fleet.run_worker() fleet.run_worker()
def test_stop_worker(self): def test_stop_worker(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -141,7 +141,7 @@ class TestFleetBase(unittest.TestCase): ...@@ -141,7 +141,7 @@ class TestFleetBase(unittest.TestCase):
fleet.stop_worker() fleet.stop_worker()
def test_distributed_optimizer(self): def test_distributed_optimizer(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -151,7 +151,7 @@ class TestFleetBase(unittest.TestCase): ...@@ -151,7 +151,7 @@ class TestFleetBase(unittest.TestCase):
def test_minimize(self): def test_minimize(self):
import paddle import paddle
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
input_x = paddle.fluid.layers.data( input_x = paddle.fluid.layers.data(
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
import paddle import paddle
from paddle import fluid from paddle import fluid
import os import os
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
...@@ -47,7 +47,7 @@ class TestFleetDGCOptimizer(unittest.TestCase): ...@@ -47,7 +47,7 @@ class TestFleetDGCOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.dgc = True strategy.dgc = True
strategy.dgc_configs = { strategy.dgc_configs = {
"rampup_begin_step": 128, "rampup_begin_step": 128,
......
...@@ -19,7 +19,7 @@ import os ...@@ -19,7 +19,7 @@ import os
class TestStrategyConfig(unittest.TestCase): class TestStrategyConfig(unittest.TestCase):
def test_amp(self): def test_amp(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.amp = True strategy.amp = True
self.assertEqual(strategy.amp, True) self.assertEqual(strategy.amp, True)
strategy.amp = False strategy.amp = False
...@@ -28,7 +28,7 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -28,7 +28,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.amp, False) self.assertEqual(strategy.amp, False)
def test_amp_configs(self): def test_amp_configs(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
configs = { configs = {
"init_loss_scaling": 32768, "init_loss_scaling": 32768,
"decr_every_n_nan_or_inf": 2, "decr_every_n_nan_or_inf": 2,
...@@ -41,7 +41,7 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.amp_configs["init_loss_scaling"], 32768) self.assertEqual(strategy.amp_configs["init_loss_scaling"], 32768)
def test_recompute(self): def test_recompute(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.recompute = True strategy.recompute = True
self.assertEqual(strategy.recompute, True) self.assertEqual(strategy.recompute, True)
strategy.recompute = False strategy.recompute = False
...@@ -50,13 +50,13 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -50,13 +50,13 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.recompute, False) self.assertEqual(strategy.recompute, False)
def test_recompute_configs(self): def test_recompute_configs(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
configs = {"checkpoints": ["x", "y"]} configs = {"checkpoints": ["x", "y"]}
strategy.recompute_configs = configs strategy.recompute_configs = configs
self.assertEqual(len(strategy.recompute_configs["checkpoints"]), 2) self.assertEqual(len(strategy.recompute_configs["checkpoints"]), 2)
def test_pipeline(self): def test_pipeline(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.pipeline = True strategy.pipeline = True
self.assertEqual(strategy.pipeline, True) self.assertEqual(strategy.pipeline, True)
strategy.pipeline = False strategy.pipeline = False
...@@ -65,13 +65,13 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -65,13 +65,13 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.pipeline, False) self.assertEqual(strategy.pipeline, False)
def test_pipeline_configs(self): def test_pipeline_configs(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
configs = {"micro_batch": 4} configs = {"micro_batch": 4}
strategy.pipeline_configs = configs strategy.pipeline_configs = configs
self.assertEqual(strategy.pipeline_configs["micro_batch"], 4) self.assertEqual(strategy.pipeline_configs["micro_batch"], 4)
def test_localsgd(self): def test_localsgd(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.localsgd = True strategy.localsgd = True
self.assertEqual(strategy.localsgd, True) self.assertEqual(strategy.localsgd, True)
strategy.localsgd = False strategy.localsgd = False
...@@ -80,13 +80,13 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -80,13 +80,13 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.localsgd, False) self.assertEqual(strategy.localsgd, False)
def test_localsgd_configs(self): def test_localsgd_configs(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
configs = {"k_steps": 4} configs = {"k_steps": 4}
strategy.localsgd_configs = configs strategy.localsgd_configs = configs
self.assertEqual(strategy.localsgd_configs["k_steps"], 4) self.assertEqual(strategy.localsgd_configs["k_steps"], 4)
def test_dgc(self): def test_dgc(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.dgc = True strategy.dgc = True
self.assertEqual(strategy.dgc, True) self.assertEqual(strategy.dgc, True)
strategy.dgc = False strategy.dgc = False
...@@ -95,7 +95,7 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -95,7 +95,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.dgc, False) self.assertEqual(strategy.dgc, False)
def test_sync_nccl_allreduce(self): def test_sync_nccl_allreduce(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.sync_nccl_allreduce = True strategy.sync_nccl_allreduce = True
self.assertEqual(strategy.sync_nccl_allreduce, True) self.assertEqual(strategy.sync_nccl_allreduce, True)
strategy.sync_nccl_allreduce = False strategy.sync_nccl_allreduce = False
...@@ -104,14 +104,14 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -104,14 +104,14 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.sync_nccl_allreduce, False) self.assertEqual(strategy.sync_nccl_allreduce, False)
def test_nccl_comm_num(self): def test_nccl_comm_num(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.nccl_comm_num = 1 strategy.nccl_comm_num = 1
self.assertEqual(strategy.nccl_comm_num, 1) self.assertEqual(strategy.nccl_comm_num, 1)
strategy.nccl_comm_num = "2" strategy.nccl_comm_num = "2"
self.assertEqual(strategy.nccl_comm_num, 1) self.assertEqual(strategy.nccl_comm_num, 1)
def test_use_hierarchical_allreduce(self): def test_use_hierarchical_allreduce(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.use_hierarchical_allreduce = True strategy.use_hierarchical_allreduce = True
self.assertEqual(strategy.use_hierarchical_allreduce, True) self.assertEqual(strategy.use_hierarchical_allreduce, True)
strategy.use_hierarchical_allreduce = False strategy.use_hierarchical_allreduce = False
...@@ -120,14 +120,14 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -120,14 +120,14 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.use_hierarchical_allreduce, False) self.assertEqual(strategy.use_hierarchical_allreduce, False)
def test_hierarchical_allreduce_inter_nranks(self): def test_hierarchical_allreduce_inter_nranks(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.hierarchical_allreduce_inter_nranks = 8 strategy.hierarchical_allreduce_inter_nranks = 8
self.assertEqual(strategy.hierarchical_allreduce_inter_nranks, 8) self.assertEqual(strategy.hierarchical_allreduce_inter_nranks, 8)
strategy.hierarchical_allreduce_inter_nranks = "4" strategy.hierarchical_allreduce_inter_nranks = "4"
self.assertEqual(strategy.hierarchical_allreduce_inter_nranks, 8) self.assertEqual(strategy.hierarchical_allreduce_inter_nranks, 8)
def test_sync_batch_norm(self): def test_sync_batch_norm(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.sync_batch_norm = True strategy.sync_batch_norm = True
self.assertEqual(strategy.sync_batch_norm, True) self.assertEqual(strategy.sync_batch_norm, True)
strategy.sync_batch_norm = False strategy.sync_batch_norm = False
...@@ -136,7 +136,7 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -136,7 +136,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.sync_batch_norm, False) self.assertEqual(strategy.sync_batch_norm, False)
def test_fuse_all_reduce_ops(self): def test_fuse_all_reduce_ops(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.fuse_all_reduce_ops = True strategy.fuse_all_reduce_ops = True
self.assertEqual(strategy.fuse_all_reduce_ops, True) self.assertEqual(strategy.fuse_all_reduce_ops, True)
strategy.fuse_all_reduce_ops = False strategy.fuse_all_reduce_ops = False
...@@ -145,21 +145,21 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -145,21 +145,21 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.fuse_all_reduce_ops, False) self.assertEqual(strategy.fuse_all_reduce_ops, False)
def test_fuse_grad_size_in_MB(self): def test_fuse_grad_size_in_MB(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.fuse_grad_size_in_MB = 50 strategy.fuse_grad_size_in_MB = 50
self.assertEqual(strategy.fuse_grad_size_in_MB, 50) self.assertEqual(strategy.fuse_grad_size_in_MB, 50)
strategy.fuse_grad_size_in_MB = "40" strategy.fuse_grad_size_in_MB = "40"
self.assertEqual(strategy.fuse_grad_size_in_MB, 50) self.assertEqual(strategy.fuse_grad_size_in_MB, 50)
def test_fuse_grad_size_in_TFLOPS(self): def test_fuse_grad_size_in_TFLOPS(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy._fuse_grad_size_in_TFLOPS = 0.1 strategy._fuse_grad_size_in_TFLOPS = 0.1
self.assertGreater(strategy._fuse_grad_size_in_TFLOPS, 0.09) self.assertGreater(strategy._fuse_grad_size_in_TFLOPS, 0.09)
strategy._fuse_grad_size_in_TFLOPS = "0.3" strategy._fuse_grad_size_in_TFLOPS = "0.3"
self.assertGreater(strategy._fuse_grad_size_in_TFLOPS, 0.09) self.assertGreater(strategy._fuse_grad_size_in_TFLOPS, 0.09)
def test_gradient_merge(self): def test_gradient_merge(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.gradient_merge = True strategy.gradient_merge = True
self.assertEqual(strategy.gradient_merge, True) self.assertEqual(strategy.gradient_merge, True)
strategy.gradient_merge = False strategy.gradient_merge = False
...@@ -168,13 +168,13 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -168,13 +168,13 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.gradient_merge, False) self.assertEqual(strategy.gradient_merge, False)
def test_gradient_merge_configs(self): def test_gradient_merge_configs(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
configs = {"k_steps": 4} configs = {"k_steps": 4}
strategy.gradient_merge_configs = configs strategy.gradient_merge_configs = configs
self.assertEqual(strategy.gradient_merge_configs["k_steps"], 4) self.assertEqual(strategy.gradient_merge_configs["k_steps"], 4)
def test_lars(self): def test_lars(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lars = True strategy.lars = True
self.assertEqual(strategy.lars, True) self.assertEqual(strategy.lars, True)
strategy.lars = False strategy.lars = False
...@@ -183,7 +183,7 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -183,7 +183,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.lars, False) self.assertEqual(strategy.lars, False)
def test_lamb(self): def test_lamb(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lamb = True strategy.lamb = True
self.assertEqual(strategy.lamb, True) self.assertEqual(strategy.lamb, True)
strategy.lamb = False strategy.lamb = False
...@@ -192,7 +192,7 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -192,7 +192,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.lamb, False) self.assertEqual(strategy.lamb, False)
def test_a_sync(self): def test_a_sync(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True strategy.a_sync = True
self.assertEqual(strategy.a_sync, True) self.assertEqual(strategy.a_sync, True)
strategy.a_sync = False strategy.a_sync = False
...@@ -202,13 +202,13 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -202,13 +202,13 @@ class TestStrategyConfig(unittest.TestCase):
strategy.a_sync = "True" strategy.a_sync = "True"
def test_a_sync_configs(self): def test_a_sync_configs(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
configs = {"k_steps": 1000} configs = {"k_steps": 1000}
strategy.a_sync_configs = configs strategy.a_sync_configs = configs
self.assertEqual(strategy.a_sync_configs["k_steps"], 1000) self.assertEqual(strategy.a_sync_configs["k_steps"], 1000)
def test_elastic(self): def test_elastic(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.elastic = True strategy.elastic = True
self.assertEqual(strategy.elastic, True) self.assertEqual(strategy.elastic, True)
strategy.elastic = False strategy.elastic = False
...@@ -217,7 +217,7 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -217,7 +217,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.elastic, False) self.assertEqual(strategy.elastic, False)
def test_auto(self): def test_auto(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.auto = True strategy.auto = True
self.assertEqual(strategy.auto, True) self.assertEqual(strategy.auto, True)
strategy.auto = False strategy.auto = False
...@@ -226,7 +226,7 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -226,7 +226,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.auto, False) self.assertEqual(strategy.auto, False)
def test_strategy_prototxt(self): def test_strategy_prototxt(self):
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True strategy.a_sync = True
strategy.localsgd = True strategy.localsgd = True
strategy.dgc = True strategy.dgc = True
...@@ -255,7 +255,7 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -255,7 +255,7 @@ class TestStrategyConfig(unittest.TestCase):
exe_strategy.num_iteration_per_run = 10 exe_strategy.num_iteration_per_run = 10
strategy.execution_strategy = exe_strategy strategy.execution_strategy = exe_strategy
strategy.save_to_prototxt("dist_strategy.prototxt") strategy.save_to_prototxt("dist_strategy.prototxt")
strategy2 = paddle.fleet.DistributedStrategy() strategy2 = paddle.distributed.fleet.DistributedStrategy()
strategy2.load_from_prototxt("dist_strategy.prototxt") strategy2.load_from_prototxt("dist_strategy.prototxt")
self.assertEqual(strategy.dgc, strategy2.dgc) self.assertEqual(strategy.dgc, strategy2.dgc)
...@@ -277,7 +277,7 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -277,7 +277,7 @@ class TestStrategyConfig(unittest.TestCase):
build_strategy.enable_backward_optimizer_op_deps = True build_strategy.enable_backward_optimizer_op_deps = True
build_strategy.trainers_endpoints = ["1", "2"] build_strategy.trainers_endpoints = ["1", "2"]
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.build_strategy = build_strategy strategy.build_strategy = build_strategy
def test_execution_strategy(self): def test_execution_strategy(self):
...@@ -286,7 +286,7 @@ class TestStrategyConfig(unittest.TestCase): ...@@ -286,7 +286,7 @@ class TestStrategyConfig(unittest.TestCase):
exe_strategy.num_iteration_per_drop_scope = 10 exe_strategy.num_iteration_per_drop_scope = 10
exe_strategy.num_iteration_per_run = 10 exe_strategy.num_iteration_per_run = 10
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.execution_strategy = exe_strategy strategy.execution_strategy = exe_strategy
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import paddle import paddle
import os import os
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
...@@ -41,7 +41,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.gradient_merge = True strategy.gradient_merge = True
strategy.gradient_merge_configs = {"k_steps": 2, "avg": True} strategy.gradient_merge_configs = {"k_steps": 2, "avg": True}
optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = paddle.optimizer.SGD(learning_rate=0.01)
......
...@@ -39,7 +39,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
} }
def node_func(): def node_func():
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -57,7 +57,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -57,7 +57,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer( optimizer = fleet.distributed_optimizer(
optimizer, strategy=strategy) optimizer, strategy=strategy)
...@@ -90,7 +90,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -90,7 +90,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
} }
def node_func(): def node_func():
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -108,7 +108,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -108,7 +108,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.nccl_comm_num = 2 strategy.nccl_comm_num = 2
strategy.sync_nccl_allreduce = True strategy.sync_nccl_allreduce = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = paddle.optimizer.SGD(learning_rate=0.01)
......
...@@ -39,7 +39,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
} }
def node_func(): def node_func():
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -57,7 +57,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -57,7 +57,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.nccl_comm_num = 2 strategy.nccl_comm_num = 2
strategy.sync_nccl_allreduce = True strategy.sync_nccl_allreduce = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = paddle.optimizer.SGD(learning_rate=0.01)
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
import paddle import paddle
from paddle import fluid from paddle import fluid
import os import os
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
...@@ -47,7 +47,7 @@ class TestFleetLambMetaOptimizer(unittest.TestCase): ...@@ -47,7 +47,7 @@ class TestFleetLambMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lamb = True strategy.lamb = True
strategy.lamb_configs = { strategy.lamb_configs = {
'lamb_weight_decay': 0.01, 'lamb_weight_decay': 0.01,
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
import paddle import paddle
from paddle import fluid from paddle import fluid
import os import os
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
...@@ -47,7 +47,7 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase): ...@@ -47,7 +47,7 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lars = True strategy.lars = True
strategy.lars_configs = { strategy.lars_configs = {
"lars_coeff": 0.001, "lars_coeff": 0.001,
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
import paddle import paddle
import os import os
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
...@@ -39,7 +39,7 @@ class TestFleetLocalSGDMetaOptimizer(unittest.TestCase): ...@@ -39,7 +39,7 @@ class TestFleetLocalSGDMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.localsgd = True strategy.localsgd = True
strategy.auto = True strategy.auto = True
config = strategy.localsgd_configs config = strategy.localsgd_configs
......
...@@ -19,7 +19,7 @@ import paddle ...@@ -19,7 +19,7 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import os import os
import unittest import unittest
import paddle.fleet.metrics.metric as metric import paddle.distributed.fleet.metrics.metric as metric
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
......
...@@ -24,7 +24,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): ...@@ -24,7 +24,7 @@ class TestFleetMetaOptimizer(unittest.TestCase):
"PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002" "PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002"
def test_pipeline_optimizer(self): def test_pipeline_optimizer(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -49,7 +49,7 @@ class TestFleetMetaOptimizer(unittest.TestCase): ...@@ -49,7 +49,7 @@ class TestFleetMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.pipeline = True strategy.pipeline = True
strategy.pipeline_configs = {'micro_batch': 2} strategy.pipeline_configs = {'micro_batch': 2}
......
...@@ -36,7 +36,7 @@ class TestFleetPrivateFunction(unittest.TestCase): ...@@ -36,7 +36,7 @@ class TestFleetPrivateFunction(unittest.TestCase):
thr = threading.Thread(target=init_server, args=(9292, )) thr = threading.Thread(target=init_server, args=(9292, ))
thr.start() thr.start()
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
ep = ["127.0.0.1:9292"] ep = ["127.0.0.1:9292"]
fleet.base.private_helper_function.wait_server_ready(ep) fleet.base.private_helper_function.wait_server_ready(ep)
......
...@@ -26,7 +26,7 @@ class TestFleetRecomputeMetaOptimizer(unittest.TestCase): ...@@ -26,7 +26,7 @@ class TestFleetRecomputeMetaOptimizer(unittest.TestCase):
"127.0.0.1:36001,127.0.0.2:36001" "127.0.0.1:36001,127.0.0.2:36001"
def test_recompute_optimizer(self): def test_recompute_optimizer(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -41,7 +41,7 @@ class TestFleetRecomputeMetaOptimizer(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestFleetRecomputeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y) input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost) avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy() strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.recompute = True strategy.recompute = True
strategy.recompute_configs = {"checkpoints": ["fc2"]} strategy.recompute_configs = {"checkpoints": ["fc2"]}
......
...@@ -163,7 +163,7 @@ class TestCloudRoleMaker2(unittest.TestCase): ...@@ -163,7 +163,7 @@ class TestCloudRoleMaker2(unittest.TestCase):
data = "1 1 1 1\n" data = "1 1 1 1\n"
f.write(data) f.write(data)
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_filelist(["test_fleet_gloo_role_maker_1.txt"]) dataset.set_filelist(["test_fleet_gloo_role_maker_1.txt"])
dataset.set_use_var([show, label]) dataset.set_use_var([show, label])
......
...@@ -40,9 +40,9 @@ class TestCloudRoleMaker(unittest.TestCase): ...@@ -40,9 +40,9 @@ class TestCloudRoleMaker(unittest.TestCase):
from paddle.fluid.incubate.fleet.parameter_server.pslib import PSLib from paddle.fluid.incubate.fleet.parameter_server.pslib import PSLib
from paddle.fluid.incubate.fleet.base.role_maker import \ from paddle.fluid.incubate.fleet.base.role_maker import \
GeneralRoleMaker GeneralRoleMaker
from paddle.fleet.utils import KVHandler from paddle.distributed.fleet.utils import KVHandler
from paddle.fleet.utils import KVServer from paddle.distributed.fleet.utils import KVServer
from paddle.fleet.utils import KVHTTPServer from paddle.distributed.fleet.utils import KVHTTPServer
except: except:
print("warning: no fleet, skip test_pslib_4") print("warning: no fleet, skip test_pslib_4")
return return
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
from __future__ import print_function from __future__ import print_function
import os import os
import unittest import unittest
import paddle.fleet.base.role_maker as role_maker import paddle.distributed.fleet.base.role_maker as role_maker
class TestRoleMakerBase(unittest.TestCase): class TestRoleMakerBase(unittest.TestCase):
......
...@@ -19,16 +19,17 @@ import os ...@@ -19,16 +19,17 @@ import os
class TestFleetRuntime(unittest.TestCase): class TestFleetRuntime(unittest.TestCase):
def test_fleet_runtime_base(self): def test_fleet_runtime_base(self):
import paddle.fleet.runtime import paddle.distributed.fleet.runtime
base = paddle.fleet.runtime.runtime_base.RuntimeBase() base = paddle.distributed.fleet.runtime.runtime_base.RuntimeBase()
base._run_worker() base._run_worker()
base._init_server() base._init_server()
base._run_server() base._run_server()
base._stop_worker() base._stop_worker()
def test_fleet_collective_runtime(self): def test_fleet_collective_runtime(self):
import paddle.fleet.runtime import paddle.distributed.fleet.runtime
collective_runtime = paddle.fleet.runtime.CollectiveRuntime() collective_runtime = paddle.distributed.fleet.runtime.CollectiveRuntime(
)
collective_runtime._init_worker() collective_runtime._init_worker()
collective_runtime._run_worker() collective_runtime._run_worker()
collective_runtime._init_worker() collective_runtime._init_worker()
......
...@@ -22,8 +22,8 @@ import tempfile ...@@ -22,8 +22,8 @@ import tempfile
import os import os
import sys import sys
from paddle.dataset.common import download, DATA_HOME from paddle.dataset.common import download, DATA_HOME
from paddle.fleet.base.util_factory import fleet_util from paddle.distributed.fleet.base.util_factory import fleet_util
import paddle.fleet.base.role_maker as role_maker import paddle.distributed.fleet.base.role_maker as role_maker
class TestFleetUtil(unittest.TestCase): class TestFleetUtil(unittest.TestCase):
...@@ -34,7 +34,7 @@ class TestFleetUtil(unittest.TestCase): ...@@ -34,7 +34,7 @@ class TestFleetUtil(unittest.TestCase):
train_dir = os.path.join("fleet_util_data", "train_program") train_dir = os.path.join("fleet_util_data", "train_program")
def test_util_base(self): def test_util_base(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
util = fleet.UtilBase() util = fleet.UtilBase()
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
util._set_strategy(strategy) util._set_strategy(strategy)
...@@ -42,7 +42,7 @@ class TestFleetUtil(unittest.TestCase): ...@@ -42,7 +42,7 @@ class TestFleetUtil(unittest.TestCase):
util._set_role_maker(role_maker) util._set_role_maker(role_maker)
def test_util_factory(self): def test_util_factory(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
factory = fleet.base.util_factory.UtilFactory() factory = fleet.base.util_factory.UtilFactory()
strategy = fleet.DistributedStrategy() strategy = fleet.DistributedStrategy()
role_maker = None # should be fleet.PaddleCloudRoleMaker() role_maker = None # should be fleet.PaddleCloudRoleMaker()
...@@ -55,7 +55,7 @@ class TestFleetUtil(unittest.TestCase): ...@@ -55,7 +55,7 @@ class TestFleetUtil(unittest.TestCase):
self.assertEqual(util.role_maker, None) self.assertEqual(util.role_maker, None)
def test_get_util(self): def test_get_util(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
...@@ -63,7 +63,7 @@ class TestFleetUtil(unittest.TestCase): ...@@ -63,7 +63,7 @@ class TestFleetUtil(unittest.TestCase):
self.assertEqual(default_util, None) self.assertEqual(default_util, None)
def test_set_user_defined_util(self): def test_set_user_defined_util(self):
import paddle.fleet as fleet import paddle.distributed.fleet as fleet
class UserDefinedUtil(fleet.UtilBase): class UserDefinedUtil(fleet.UtilBase):
def __init__(self): def __init__(self):
...@@ -81,7 +81,7 @@ class TestFleetUtil(unittest.TestCase): ...@@ -81,7 +81,7 @@ class TestFleetUtil(unittest.TestCase):
self.assertEqual(user_id, 10) self.assertEqual(user_id, 10)
def test_fs(self): def test_fs(self):
from paddle.fleet.utils import LocalFS from paddle.distributed.fleet.utils import LocalFS
fs = LocalFS() fs = LocalFS()
dirs, files = fs.ls_dir("test_tmp") dirs, files = fs.ls_dir("test_tmp")
dirs, files = fs.ls_dir("./") dirs, files = fs.ls_dir("./")
......
...@@ -20,7 +20,7 @@ import os ...@@ -20,7 +20,7 @@ import os
import sys import sys
import inspect import inspect
from paddle.fleet.utils import LocalFS, FS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError from paddle.distributed.fleet.utils import LocalFS, FS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError
class FSTest(unittest.TestCase): class FSTest(unittest.TestCase):
......
...@@ -19,7 +19,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet ...@@ -19,7 +19,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet
import os import os
import sys import sys
from paddle.fleet.utils import LocalFS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError from paddle.distributed.fleet.utils import LocalFS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError
java_home = os.environ["JAVA_HOME"] java_home = os.environ["JAVA_HOME"]
......
...@@ -52,7 +52,7 @@ class TestDatasetWithStat(unittest.TestCase): ...@@ -52,7 +52,7 @@ class TestDatasetWithStat(unittest.TestCase):
name=slot, shape=[1], dtype="int64", lod_level=1) name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var) slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset( dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset") "InMemoryDataset")
dataset.set_batch_size(32) dataset.set_batch_size(32)
dataset.set_thread(3) dataset.set_thread(3)
......
...@@ -145,14 +145,14 @@ packages=['paddle', ...@@ -145,14 +145,14 @@ packages=['paddle',
'paddle.incubate', 'paddle.incubate',
'paddle.incubate.complex', 'paddle.incubate.complex',
'paddle.incubate.complex.tensor', 'paddle.incubate.complex.tensor',
'paddle.fleet', 'paddle.distributed.fleet',
'paddle.fleet.base', 'paddle.distributed.fleet.base',
'paddle.fleet.meta_optimizers', 'paddle.distributed.fleet.meta_optimizers',
'paddle.fleet.runtime', 'paddle.distributed.fleet.runtime',
'paddle.fleet.dataset', 'paddle.distributed.fleet.dataset',
'paddle.fleet.metrics', 'paddle.distributed.fleet.metrics',
'paddle.fleet.proto', 'paddle.distributed.fleet.proto',
'paddle.fleet.utils', 'paddle.distributed.fleet.utils',
'paddle.framework', 'paddle.framework',
'paddle.jit', 'paddle.jit',
'paddle.fluid', 'paddle.fluid',
...@@ -482,7 +482,7 @@ with redirect_stdout(): ...@@ -482,7 +482,7 @@ with redirect_stdout():
}, },
entry_points={ entry_points={
'console_scripts': [ 'console_scripts': [
'fleetrun = paddle.fleet.launch:launch' 'fleetrun = paddle.distributed.fleet.launch:launch'
] ]
} }
) )
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册