未验证 提交 50a5bcfc 编写于 作者: D Dong Daxiang 提交者: GitHub

【paddle.fleet】paddle.fleet -> paddle.distributed.fleet. (#26186)

* move paddle.fleet to paddle.distributed.fleet
上级 ffe52b44
......@@ -164,23 +164,23 @@ if(WITH_PYTHON)
if (NOT WIN32)
add_custom_command(TARGET framework_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto
COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto/__init__.py
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto
COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/__init__.py
COMMAND cp *.py ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/
COMMAND cp distributed_strategy_*.py ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto
COMMAND cp distributed_strategy_*.py ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto
COMMENT "Copy generated python proto into directory paddle/fluid/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
else(NOT WIN32)
string(REPLACE "/" "\\" proto_dstpath "${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/")
string(REPLACE "/" "\\" fleet_proto_dstpath "${PADDLE_BINARY_DIR}/python/paddle/fleet/proto/")
string(REPLACE "/" "\\" fleet_proto_dstpath "${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/")
add_custom_command(TARGET framework_py_proto POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto
COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/fleet/proto/__init__.py
COMMAND ${CMAKE_COMMAND} -E make_directory ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto
COMMAND ${CMAKE_COMMAND} -E touch ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/__init__.py
COMMAND copy /Y *.py ${proto_dstpath}
COMMAND copy /Y distributed_strategy_*.py ${fleet_proto_dstpath}
COMMENT "Copy generated python proto into directory paddle/fluid/proto."
COMMENT "Copy generated python proto into directory paddle/fleet/proto."
COMMENT "Copy generated python proto into directory paddle/distributed/fleet/proto."
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif(NOT WIN32)
endif()
......
......@@ -36,7 +36,7 @@ import paddle.distributed
import paddle.sysconfig
import paddle.tensor
import paddle.nn
import paddle.fleet
import paddle.distributed.fleet
import paddle.framework
import paddle.optimizer
import paddle.metric
......
......@@ -13,7 +13,7 @@
# limitations under the License.
import paddle
from paddle.fleet.proto import distributed_strategy_pb2
from paddle.distributed.fleet.proto import distributed_strategy_pb2
from paddle.fluid.framework import Variable
import google.protobuf.text_format
......@@ -103,7 +103,7 @@ class DistributedStrategy(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.dgc = True
strategy.recompute = True
......@@ -120,7 +120,7 @@ class DistributedStrategy(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.load_from_prototxt("dist_strategy.protoxt")
"""
......@@ -141,7 +141,7 @@ class DistributedStrategy(object):
exe_strategy.num_iteration_per_drop_scope = 10
exe_strategy.num_iteration_per_run = 10
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.execution_strategy = exe_strategy
"""
execution_strategy = paddle.fluid.ExecutionStrategy()
......@@ -178,7 +178,7 @@ class DistributedStrategy(object):
build_strategy.fuse_all_optimizer_ops = True
build_strategy.enable_inplace = True
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.build_strategy = build_strategy
"""
......@@ -211,7 +211,7 @@ class DistributedStrategy(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
role_maker = fleet.PaddleCloudRoleMaker()
fleet.init(role_maker)
......@@ -253,7 +253,7 @@ class DistributedStrategy(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
role_maker = fleet.PaddleCloudRoleMaker()
fleet.init(role_maker)
......@@ -282,7 +282,7 @@ class DistributedStrategy(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.amp = True # by default this is false
......@@ -314,7 +314,7 @@ class DistributedStrategy(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.recompute = True
# suppose x and y are names of checkpoint tensors for recomputation
......@@ -432,7 +432,7 @@ class DistributedStrategy(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.recompute = True
strategy.recompute_configs = {"checkpionts": ["x", "y"]}
......@@ -457,7 +457,7 @@ class DistributedStrategy(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.pipeline = True
......@@ -490,7 +490,7 @@ class DistributedStrategy(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.pipeline = True
strategy.pipeline_configs = {"micro_batch": 12}
......@@ -560,7 +560,7 @@ class DistributedStrategy(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.gradient_merge = True
strategy.gradient_merge_configs = {"k_steps": 4, "avg": True}
......@@ -583,7 +583,7 @@ class DistributedStrategy(object):
avg (bool): whether to average the gradients of each mini-batch,
the default value is `True`
Example:
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.gradient_merge = True
strategy.gradient_merge_configs = {"k_steps": 4, "avg": True}
......
......@@ -34,9 +34,8 @@ class Fleet(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
import paddle.distributed.fleet as fleet
role = fleet.role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
strategy = fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
......@@ -218,9 +217,8 @@ class Fleet(object):
Examples:
.. code-block:: python
import paddle.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
import paddle.distributed.fleet as fleet
role = fleet.role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
strategy = fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
......@@ -260,8 +258,7 @@ class Fleet(object):
Examples:
import paddle
import paddle.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
import paddle.distributed.fleet as fleet
fc_1 = paddle.layers.fc(input=input_x, size=hid_dim, act='tanh')
fc_2 = paddlen.layers.fc(input=fc_1, size=hid_dim, act='tanh')
......@@ -269,7 +266,7 @@ class Fleet(object):
cost = paddle.layers.cross_entropy(input=prediction, label=input_y)
avg_cost = paddle.layers.mean(x=cost)
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
role = fleet.role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
strategy = fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
......
......@@ -481,7 +481,7 @@ class PaddleCloudRoleMaker(RoleMakerBase):
return "lo"
def __start_kv_server(self, http_server_d, size_d):
from paddle.fleet.utils import KVServer
from paddle.distributed.fleet.utils import KVServer
http_server = KVServer(int(self._http_ip_port[1]), size_d)
http_server.start()
wait_seconds = 5
......
......@@ -55,8 +55,8 @@ class UtilBase(object):
def set_file_system(self, fs_client):
assert isinstance(
fs_client,
FS), "fs_client must be the instance of paddle.fleet.utils.FS"
fs_client, FS
), "fs_client must be the instance of paddle.distributed.fleet.utils.FS"
self.fs_client = fs_client
def __check_comm_world(self, comm_world="worker"):
......
......@@ -14,7 +14,7 @@
import os
import paddle
from paddle.fleet.launch_utils import get_cluster, logger
from paddle.distributed.fleet.launch_utils import get_cluster, logger
def get_cloud_cluster(args_node_ips, selected_gpus, args_port=6170):
......
......@@ -66,8 +66,8 @@ from argparse import ArgumentParser, REMAINDER
import paddle
import paddle.fluid as fluid
from paddle.fleet.launch_utils import *
import paddle.fleet.cloud_utils as cloud_utils
from paddle.distributed.fleet.launch_utils import *
import paddle.distributed.fleet.cloud_utils as cloud_utils
def _print_arguments(args):
......
......@@ -43,7 +43,7 @@ def sum(input, scope=None):
# in train.py, after train or infer
res = np.array(scope.find_var(global_cnt.name).get_tensor())
print("sum array: ", paddle.fleet.sum(res))
print("sum array: ", paddle.distributed.fleet.sum(res))
"""
fleet._role_maker._barrier_worker()
if scope is None:
......@@ -82,7 +82,7 @@ def max(input, scope=None):
# in train.py, after train or infer
res = np.array(scope.find_var(global_cnt.name).get_tensor())
print("max array: ", paddle.fleet.max(res))
print("max array: ", paddle.distributed.fleet.max(res))
"""
fleet._role_maker._barrier_worker()
if scope is None:
......@@ -121,7 +121,7 @@ def min(input, scope=None):
# in train.py, after train or infer
res = np.array(scope.find_var(global_cnt.name).get_tensor())
print("min array: ", paddle.fleet.min(res))
print("min array: ", paddle.distributed.fleet.min(res))
"""
fleet._role_maker._barrier_worker()
if scope is None:
......@@ -162,7 +162,7 @@ def auc(stat_pos, stat_neg, scope=None):
# in train.py, after train or infer
pos = np.array(scope.find_var(stat_pos.name).get_tensor())
neg = np.array(scope.find_var(stat_neg.name).get_tensor())
print("auc: ", paddle.fleet.auc(pos, neg))
print("auc: ", paddle.distributed.fleet.auc(pos, neg))
"""
fleet._role_maker._barrier_worker()
if scope is None:
......@@ -240,7 +240,7 @@ def mae(abserr, total_ins_num, scope=None):
# in train.py, after train or infer
res = np.array(scope.find_var(abserr.name).get_tensor())
print("mae: ", paddle.fleet.mae(res, total_ins_num))
print("mae: ", paddle.distributed.fleet.mae(res, total_ins_num))
"""
fleet._role_maker._barrier_worker()
if scope is None:
......@@ -278,7 +278,7 @@ def rmse(sqrerr, total_ins_num, scope=None):
# in train.py, after train or infer
res = np.array(scope.find_var(sqrerr.name).get_tensor())
print("rmse: ", paddle.fleet.rmse(res, total_ins_num))
print("rmse: ", paddle.distributed.fleet.rmse(res, total_ins_num))
"""
fleet._role_maker._barrier_worker()
if scope is None:
......@@ -316,7 +316,7 @@ def mse(sqrerr, total_ins_num, scope=None):
# in train.py, after train or infer
metric = np.array(scope.find_var(sqrerr.name).get_tensor())
print("mse: ", paddle.fleet.mse(metric, total_ins_num))
print("mse: ", paddle.distributed.fleet.mse(metric, total_ins_num))
"""
fleet._role_maker._barrier_worker()
if scope is None:
......@@ -365,7 +365,7 @@ def acc(correct, total, scope=None):
# in train.py, after train or infer
correct_num = np.array(scope.find_var(correct.name).get_tensor())
total_num = np.array(scope.find_var(total.name).get_tensor())
print("accuracy: ", paddle.fleet.acc(correct_num, total_num))
print("accuracy: ", paddle.distributed.fleet.acc(correct_num, total_num))
"""
fleet._role_maker._barrier_worker()
if scope is None:
......
......@@ -305,7 +305,7 @@ class TrainEpochRange(SerializableBase):
if self._checker.ce_test:
config = None
from paddle.fleet.utils.fs import HDFSClient
from paddle.distributed.fleet.utils.fs import HDFSClient
self._hdfs = HDFSClient(self._checker.hdfs_home, config)
self._cper = CheckpointSaver(self._hdfs)
......
......@@ -79,7 +79,7 @@ class CheckpointSaver(object):
tmp_path = "{}.tmp".format(real_path)
saved_path = tmp_path
from paddle.fleet.utils.fs import LocalFS
from paddle.distributed.fleet.utils.fs import LocalFS
local_fs = LocalFS()
cache_path = None
......@@ -134,7 +134,7 @@ class CheckpointSaver(object):
assert isinstance(checkpoint_no, int)
assert checkpoint_no >= 0
from paddle.fleet.utils.fs import LocalFS
from paddle.distributed.fleet.utils.fs import LocalFS
local_fs = LocalFS()
if self._fs.need_upload_download():
cache_path = "{}/{}.{}.load_cache".format(
......
......@@ -21,7 +21,7 @@ from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import SGD
from paddle.fluid.incubate.fleet.base.mode import Mode
from paddle.fleet.base.role_maker import RoleMakerBase
from paddle.distributed.fleet.base.role_maker import RoleMakerBase
from paddle.fluid.contrib.mixed_precision.decorator import OptimizerWithMixedPrecision
from . import mode
......
......@@ -1709,7 +1709,7 @@ class PyReader(DataLoaderBase):
class DatasetLoader(DataLoaderBase):
def __init__(self, dataset, places, drop_last):
assert isinstance(dataset, paddle.fleet.dataset.
assert isinstance(dataset, paddle.distributed.fleet.dataset.
DatasetBase), "dataset must be type of DatasetBase"
assert not in_dygraph_mode(
), "DatasetLoader is not supported in dygraph mode yet"
......@@ -1725,7 +1725,7 @@ class DatasetLoader(DataLoaderBase):
dataset.set_thread(thread_num)
if isinstance(dataset, paddle.fleet.dataset.
if isinstance(dataset, paddle.distributed.fleet.dataset.
InMemoryDataset) and dataset.queue_num > thread_num:
logging.warn("queue_num {} which is set in Dataset is ignored".
format(dataset.queue_num))
......
文件模式从 100755 更改为 100644
......@@ -18,7 +18,7 @@ import unittest
import os
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fleet.base.private_helper_function import wait_server_ready
from paddle.distributed.fleet.base.private_helper_function import wait_server_ready
class TestCCommInitOp(unittest.TestCase):
......
......@@ -28,7 +28,7 @@ import numpy as np
import ctr_dataset_reader
from test_dist_fleet_base import runtime_main, FleetDistRunnerBase
from paddle.fleet.base.util_factory import fleet_util
from paddle.distributed.fleet.base.util_factory import fleet_util
# Fix seed for test
fluid.default_startup_program().random_seed = 1
......@@ -217,7 +217,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
filelist.append(train_file_path)
# config dataset
dataset = paddle.fleet.DatasetFactory().create_dataset()
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset()
dataset.set_batch_size(batch_size)
dataset.set_use_var(self.feeds)
pipe_command = 'python ctr_dataset_reader.py'
......
......@@ -20,7 +20,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet
import os
import sys
from paddle.fleet.utils.fs import LocalFS, HDFSClient
from paddle.distributed.fleet.utils.fs import LocalFS, HDFSClient
import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp
from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel
from paddle.fluid.framework import program_guard
......
......@@ -20,7 +20,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet
import os
import sys
from paddle.fleet.utils.fs import LocalFS, HDFSClient
from paddle.distributed.fleet.utils.fs import LocalFS, HDFSClient
import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp
from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel
from paddle.fluid.framework import program_guard
......
......@@ -26,7 +26,7 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
class TestCommunicatorGeoEnd2End(unittest.TestCase):
......@@ -108,7 +108,7 @@ class TestCommunicatorGeoEnd2End(unittest.TestCase):
role = role_maker.PaddleCloudRoleMaker()
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True
strategy.a_sync_configs = {"k_steps": 100}
......@@ -136,7 +136,7 @@ import paddle.fluid as fluid
from paddle.fluid.communicator import Communicator
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.parameter_server.mode import DistributedMode
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
from test_communicator_geo import TestCommunicatorGeoEnd2End
......
......@@ -22,7 +22,7 @@ import paddle
import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
class TestCommunicator(unittest.TestCase):
......@@ -51,7 +51,7 @@ class TestCommunicator(unittest.TestCase):
optimizer = fluid.optimizer.SGD(0.01)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = False
optimizer = fleet.distributed_optimizer(optimizer, strategy)
......
......@@ -38,25 +38,25 @@ class TestDataset(unittest.TestCase):
def test_dataset_create(self):
""" Testcase for dataset create. """
try:
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
except:
self.assertTrue(False)
try:
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset")
except:
self.assertTrue(False)
try:
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"FileInstantDataset")
except:
self.assertTrue(False)
try:
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"MyOwnDataset")
self.assertTrue(False)
except:
......@@ -95,7 +95,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_batch_size(32)
dataset.set_thread(3)
......@@ -176,7 +176,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_batch_size(32)
dataset.set_thread(3)
......@@ -228,7 +228,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_batch_size(32)
dataset.set_thread(3)
......@@ -300,7 +300,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_batch_size(32)
dataset.set_thread(1)
......@@ -367,7 +367,7 @@ class TestDataset(unittest.TestCase):
name="slot4", shape=[1], dtype="float32", lod_level=0)
slots_vars = [var1, var2, var3, var4]
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_batch_size(32)
dataset.set_thread(1)
......@@ -423,7 +423,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_batch_size(32)
dataset.set_thread(3)
......@@ -517,7 +517,8 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset("QueueDataset")
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset")
dataset.set_batch_size(32)
dataset.set_thread(3)
dataset.set_filelist(
......@@ -542,7 +543,8 @@ class TestDataset(unittest.TestCase):
except Exception as e:
self.assertTrue(False)
dataset2 = paddle.fleet.DatasetFactory().create_dataset("QueueDataset")
dataset2 = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset")
dataset2.set_use_var(slots_vars)
dataset2.set_batch_size(32)
dataset2.set_thread(3)
......@@ -583,7 +585,8 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[1], dtype="float32", lod_level=1)
slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset("QueueDataset")
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset")
dataset.set_batch_size(32)
dataset.set_thread(3)
dataset.set_filelist(
......@@ -638,7 +641,7 @@ class TestDataset(unittest.TestCase):
name=slot, shape=[None, 1], dtype="int64", lod_level=1)
slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_input_type(1)
dataset.set_batch_size(1)
......@@ -718,7 +721,8 @@ class TestDatasetWithFetchHandler(unittest.TestCase):
inputs(list): inputs of get_dataset
files(list): files of get_dataset
"""
dataset = paddle.fleet.DatasetFactory().create_dataset("QueueDataset")
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset")
dataset.set_batch_size(32)
dataset.set_thread(3)
dataset.set_filelist(files)
......@@ -875,7 +879,7 @@ class TestDataset2(unittest.TestCase):
except ImportError as e:
print("warning: no mpi4py")
exe.run(startup_program)
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_batch_size(32)
dataset.set_thread(3)
......@@ -945,7 +949,7 @@ class TestDataset2(unittest.TestCase):
except ImportError as e:
print("warning: no mpi4py")
exe.run(startup_program)
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_batch_size(32)
dataset.set_thread(3)
......@@ -962,12 +966,12 @@ class TestDataset2(unittest.TestCase):
print("warning: catch expected error")
fleet._opt_info = None
fleet._fleet_ptr = None
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_rank_offset("")
dataset.set_pv_batch_size(1)
dataset.set_hdfs_config("", "")
d = paddle.fleet.DatasetBase()
d = paddle.distributed.fleet.DatasetBase()
try:
dataset.set_feed_type("MultiSlotInMemoryDataFeed")
except:
......@@ -1000,7 +1004,7 @@ class TestDataset2(unittest.TestCase):
dataset.get_pv_data_size()
dataset.get_memory_data_size()
dataset.get_shuffle_data_size()
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"QueueDataset")
try:
dataset.local_shuffle()
......@@ -1010,7 +1014,7 @@ class TestDataset2(unittest.TestCase):
dataset.global_shuffle()
except:
print("warning: catch expected error")
dataset = paddle.fleet.FileInstantDataset()
dataset = paddle.distributed.fleet.FileInstantDataset()
try:
dataset.local_shuffle()
except:
......
......@@ -97,7 +97,7 @@ class DatasetLoaderTestBase(unittest.TestCase):
def check_batch_number(self, place, randomize_batch_num=False):
main_prog, startup_prog, feeds = self.build_network()
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
self.dataset_name)
dataset.set_batch_size(BATCH_SIZE)
......
......@@ -33,7 +33,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
def test_a_sync_optimizer_trainer(self):
os.environ["TRAINING_ROLE"] = "TRAINER"
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
main_program = paddle.fluid.Program()
startup_program = paddle.fluid.Program()
......@@ -53,7 +53,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
......@@ -78,7 +78,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
def test_a_sync_optimizer_pserver(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
main_program = paddle.fluid.Program()
startup_program = paddle.fluid.Program()
......@@ -98,7 +98,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
......
......@@ -32,7 +32,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
def test_a_sync_optimizer_trainer(self):
os.environ["TRAINING_ROLE"] = "TRAINER"
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
main_program = paddle.fluid.Program()
startup_program = paddle.fluid.Program()
......@@ -52,7 +52,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True
strategy.a_sync_configs = {"k_steps": 100}
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
......@@ -75,7 +75,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
def test_a_sync_optimizer_pserver(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
main_program = paddle.fluid.Program()
startup_program = paddle.fluid.Program()
......@@ -95,7 +95,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True
strategy.a_sync_configs = {"k_steps": 100}
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
......
......@@ -15,7 +15,7 @@
import unittest
import paddle
import os
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
import time
......@@ -45,7 +45,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = False
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
......
......@@ -32,8 +32,8 @@ import tempfile
import unittest
import paddle.fluid as fluid
import paddle.fleet.base.role_maker as role_maker
from paddle.fleet.base.util_factory import fleet_util
import paddle.distributed.fleet.base.role_maker as role_maker
from paddle.distributed.fleet.base.util_factory import fleet_util
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler.distributed_strategy import StrategyFactory
......
......@@ -22,7 +22,7 @@ import shutil
import paddle
import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
# For Net
base_lr = 0.2
......@@ -163,7 +163,7 @@ class TestPSPassWithBow(unittest.TestCase):
fleet.init(role)
loss, acc, _ = self.net()
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
......
......@@ -23,7 +23,7 @@ class TestFleetAMPOptimizer(unittest.TestCase):
os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001"
def test_amp_optimizer(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -38,7 +38,7 @@ class TestFleetAMPOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.amp = True
strategy.amp_configs = {
"init_loss_scaling": 32768,
......
......@@ -26,13 +26,13 @@ class TestFleetBase(unittest.TestCase):
"127.0.0.1:36001,127.0.0.2:36001"
def test_init(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
def test_is_first_worker(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -40,21 +40,21 @@ class TestFleetBase(unittest.TestCase):
print("test fleet first worker done.")
def test_worker_index(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
print(fleet.worker_index())
def test_worker_num(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
print(fleet.worker_num())
def test_is_worker(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -62,14 +62,14 @@ class TestFleetBase(unittest.TestCase):
print("test fleet is worker")
def test_worker_endpoints(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
print(fleet.worker_endpoints(to_string=True))
def test_server_num(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -77,7 +77,7 @@ class TestFleetBase(unittest.TestCase):
print("fleet server num: {}".format(fleet.server_num()))
def test_server_index(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -85,7 +85,7 @@ class TestFleetBase(unittest.TestCase):
print("fleet server index: {}".format(fleet.server_index()))
def test_server_endpoints(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -94,7 +94,7 @@ class TestFleetBase(unittest.TestCase):
fleet.server_endpoints(to_string=True)))
def test_is_server(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -102,14 +102,14 @@ class TestFleetBase(unittest.TestCase):
print("test fleet is server")
def test_util(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
self.assertEqual(fleet.util, None)
def test_barrier_worker(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -117,7 +117,7 @@ class TestFleetBase(unittest.TestCase):
fleet.barrier_worker()
def test_init_worker(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -125,7 +125,7 @@ class TestFleetBase(unittest.TestCase):
fleet.init_worker()
def test_run_server(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -133,7 +133,7 @@ class TestFleetBase(unittest.TestCase):
fleet.run_worker()
def test_stop_worker(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -141,7 +141,7 @@ class TestFleetBase(unittest.TestCase):
fleet.stop_worker()
def test_distributed_optimizer(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -151,7 +151,7 @@ class TestFleetBase(unittest.TestCase):
def test_minimize(self):
import paddle
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
input_x = paddle.fluid.layers.data(
......
......@@ -16,7 +16,7 @@ import unittest
import paddle
from paddle import fluid
import os
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
......@@ -47,7 +47,7 @@ class TestFleetDGCOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.dgc = True
strategy.dgc_configs = {
"rampup_begin_step": 128,
......
......@@ -19,7 +19,7 @@ import os
class TestStrategyConfig(unittest.TestCase):
def test_amp(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.amp = True
self.assertEqual(strategy.amp, True)
strategy.amp = False
......@@ -28,7 +28,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.amp, False)
def test_amp_configs(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
configs = {
"init_loss_scaling": 32768,
"decr_every_n_nan_or_inf": 2,
......@@ -41,7 +41,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.amp_configs["init_loss_scaling"], 32768)
def test_recompute(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.recompute = True
self.assertEqual(strategy.recompute, True)
strategy.recompute = False
......@@ -50,13 +50,13 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.recompute, False)
def test_recompute_configs(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
configs = {"checkpoints": ["x", "y"]}
strategy.recompute_configs = configs
self.assertEqual(len(strategy.recompute_configs["checkpoints"]), 2)
def test_pipeline(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.pipeline = True
self.assertEqual(strategy.pipeline, True)
strategy.pipeline = False
......@@ -65,13 +65,13 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.pipeline, False)
def test_pipeline_configs(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
configs = {"micro_batch": 4}
strategy.pipeline_configs = configs
self.assertEqual(strategy.pipeline_configs["micro_batch"], 4)
def test_localsgd(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.localsgd = True
self.assertEqual(strategy.localsgd, True)
strategy.localsgd = False
......@@ -80,13 +80,13 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.localsgd, False)
def test_localsgd_configs(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
configs = {"k_steps": 4}
strategy.localsgd_configs = configs
self.assertEqual(strategy.localsgd_configs["k_steps"], 4)
def test_dgc(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.dgc = True
self.assertEqual(strategy.dgc, True)
strategy.dgc = False
......@@ -95,7 +95,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.dgc, False)
def test_sync_nccl_allreduce(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.sync_nccl_allreduce = True
self.assertEqual(strategy.sync_nccl_allreduce, True)
strategy.sync_nccl_allreduce = False
......@@ -104,14 +104,14 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.sync_nccl_allreduce, False)
def test_nccl_comm_num(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.nccl_comm_num = 1
self.assertEqual(strategy.nccl_comm_num, 1)
strategy.nccl_comm_num = "2"
self.assertEqual(strategy.nccl_comm_num, 1)
def test_use_hierarchical_allreduce(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.use_hierarchical_allreduce = True
self.assertEqual(strategy.use_hierarchical_allreduce, True)
strategy.use_hierarchical_allreduce = False
......@@ -120,14 +120,14 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.use_hierarchical_allreduce, False)
def test_hierarchical_allreduce_inter_nranks(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.hierarchical_allreduce_inter_nranks = 8
self.assertEqual(strategy.hierarchical_allreduce_inter_nranks, 8)
strategy.hierarchical_allreduce_inter_nranks = "4"
self.assertEqual(strategy.hierarchical_allreduce_inter_nranks, 8)
def test_sync_batch_norm(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.sync_batch_norm = True
self.assertEqual(strategy.sync_batch_norm, True)
strategy.sync_batch_norm = False
......@@ -136,7 +136,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.sync_batch_norm, False)
def test_fuse_all_reduce_ops(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.fuse_all_reduce_ops = True
self.assertEqual(strategy.fuse_all_reduce_ops, True)
strategy.fuse_all_reduce_ops = False
......@@ -145,21 +145,21 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.fuse_all_reduce_ops, False)
def test_fuse_grad_size_in_MB(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.fuse_grad_size_in_MB = 50
self.assertEqual(strategy.fuse_grad_size_in_MB, 50)
strategy.fuse_grad_size_in_MB = "40"
self.assertEqual(strategy.fuse_grad_size_in_MB, 50)
def test_fuse_grad_size_in_TFLOPS(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy._fuse_grad_size_in_TFLOPS = 0.1
self.assertGreater(strategy._fuse_grad_size_in_TFLOPS, 0.09)
strategy._fuse_grad_size_in_TFLOPS = "0.3"
self.assertGreater(strategy._fuse_grad_size_in_TFLOPS, 0.09)
def test_gradient_merge(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.gradient_merge = True
self.assertEqual(strategy.gradient_merge, True)
strategy.gradient_merge = False
......@@ -168,13 +168,13 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.gradient_merge, False)
def test_gradient_merge_configs(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
configs = {"k_steps": 4}
strategy.gradient_merge_configs = configs
self.assertEqual(strategy.gradient_merge_configs["k_steps"], 4)
def test_lars(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lars = True
self.assertEqual(strategy.lars, True)
strategy.lars = False
......@@ -183,7 +183,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.lars, False)
def test_lamb(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lamb = True
self.assertEqual(strategy.lamb, True)
strategy.lamb = False
......@@ -192,7 +192,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.lamb, False)
def test_a_sync(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True
self.assertEqual(strategy.a_sync, True)
strategy.a_sync = False
......@@ -202,13 +202,13 @@ class TestStrategyConfig(unittest.TestCase):
strategy.a_sync = "True"
def test_a_sync_configs(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
configs = {"k_steps": 1000}
strategy.a_sync_configs = configs
self.assertEqual(strategy.a_sync_configs["k_steps"], 1000)
def test_elastic(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.elastic = True
self.assertEqual(strategy.elastic, True)
strategy.elastic = False
......@@ -217,7 +217,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.elastic, False)
def test_auto(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.auto = True
self.assertEqual(strategy.auto, True)
strategy.auto = False
......@@ -226,7 +226,7 @@ class TestStrategyConfig(unittest.TestCase):
self.assertEqual(strategy.auto, False)
def test_strategy_prototxt(self):
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True
strategy.localsgd = True
strategy.dgc = True
......@@ -255,7 +255,7 @@ class TestStrategyConfig(unittest.TestCase):
exe_strategy.num_iteration_per_run = 10
strategy.execution_strategy = exe_strategy
strategy.save_to_prototxt("dist_strategy.prototxt")
strategy2 = paddle.fleet.DistributedStrategy()
strategy2 = paddle.distributed.fleet.DistributedStrategy()
strategy2.load_from_prototxt("dist_strategy.prototxt")
self.assertEqual(strategy.dgc, strategy2.dgc)
......@@ -277,7 +277,7 @@ class TestStrategyConfig(unittest.TestCase):
build_strategy.enable_backward_optimizer_op_deps = True
build_strategy.trainers_endpoints = ["1", "2"]
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.build_strategy = build_strategy
def test_execution_strategy(self):
......@@ -286,7 +286,7 @@ class TestStrategyConfig(unittest.TestCase):
exe_strategy.num_iteration_per_drop_scope = 10
exe_strategy.num_iteration_per_run = 10
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.execution_strategy = exe_strategy
......
......@@ -15,7 +15,7 @@
import unittest
import paddle
import os
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
......@@ -41,7 +41,7 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.gradient_merge = True
strategy.gradient_merge_configs = {"k_steps": 2, "avg": True}
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
......
......@@ -39,7 +39,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
}
def node_func():
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -57,7 +57,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(
optimizer, strategy=strategy)
......@@ -90,7 +90,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
}
def node_func():
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -108,7 +108,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.nccl_comm_num = 2
strategy.sync_nccl_allreduce = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
......
......@@ -39,7 +39,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
}
def node_func():
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -57,7 +57,7 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.nccl_comm_num = 2
strategy.sync_nccl_allreduce = True
optimizer = paddle.optimizer.SGD(learning_rate=0.01)
......
......@@ -16,7 +16,7 @@ import unittest
import paddle
from paddle import fluid
import os
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
......@@ -47,7 +47,7 @@ class TestFleetLambMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lamb = True
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
......
......@@ -16,7 +16,7 @@ import unittest
import paddle
from paddle import fluid
import os
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
......@@ -47,7 +47,7 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lars = True
strategy.lars_configs = {
"lars_coeff": 0.001,
......
......@@ -16,7 +16,7 @@ import unittest
import paddle
import os
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
......@@ -39,7 +39,7 @@ class TestFleetLocalSGDMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.localsgd = True
strategy.auto = True
config = strategy.localsgd_configs
......
......@@ -19,7 +19,7 @@ import paddle
import paddle.fluid as fluid
import os
import unittest
import paddle.fleet.metrics.metric as metric
import paddle.distributed.fleet.metrics.metric as metric
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
......
......@@ -24,7 +24,7 @@ class TestFleetMetaOptimizer(unittest.TestCase):
"PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001,127.0.0.1:36002"
def test_pipeline_optimizer(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -49,7 +49,7 @@ class TestFleetMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.pipeline = True
strategy.pipeline_configs = {'micro_batch': 2}
......
......@@ -36,7 +36,7 @@ class TestFleetPrivateFunction(unittest.TestCase):
thr = threading.Thread(target=init_server, args=(9292, ))
thr.start()
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
ep = ["127.0.0.1:9292"]
fleet.base.private_helper_function.wait_server_ready(ep)
......
......@@ -26,7 +26,7 @@ class TestFleetRecomputeMetaOptimizer(unittest.TestCase):
"127.0.0.1:36001,127.0.0.2:36001"
def test_recompute_optimizer(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -41,7 +41,7 @@ class TestFleetRecomputeMetaOptimizer(unittest.TestCase):
input=prediction, label=input_y)
avg_cost = paddle.fluid.layers.mean(x=cost)
strategy = paddle.fleet.DistributedStrategy()
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.recompute = True
strategy.recompute_configs = {"checkpoints": ["fc2"]}
......
......@@ -163,7 +163,7 @@ class TestCloudRoleMaker2(unittest.TestCase):
data = "1 1 1 1\n"
f.write(data)
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_filelist(["test_fleet_gloo_role_maker_1.txt"])
dataset.set_use_var([show, label])
......
......@@ -40,9 +40,9 @@ class TestCloudRoleMaker(unittest.TestCase):
from paddle.fluid.incubate.fleet.parameter_server.pslib import PSLib
from paddle.fluid.incubate.fleet.base.role_maker import \
GeneralRoleMaker
from paddle.fleet.utils import KVHandler
from paddle.fleet.utils import KVServer
from paddle.fleet.utils import KVHTTPServer
from paddle.distributed.fleet.utils import KVHandler
from paddle.distributed.fleet.utils import KVServer
from paddle.distributed.fleet.utils import KVHTTPServer
except:
print("warning: no fleet, skip test_pslib_4")
return
......
......@@ -16,7 +16,7 @@
from __future__ import print_function
import os
import unittest
import paddle.fleet.base.role_maker as role_maker
import paddle.distributed.fleet.base.role_maker as role_maker
class TestRoleMakerBase(unittest.TestCase):
......
......@@ -19,16 +19,17 @@ import os
class TestFleetRuntime(unittest.TestCase):
def test_fleet_runtime_base(self):
import paddle.fleet.runtime
base = paddle.fleet.runtime.runtime_base.RuntimeBase()
import paddle.distributed.fleet.runtime
base = paddle.distributed.fleet.runtime.runtime_base.RuntimeBase()
base._run_worker()
base._init_server()
base._run_server()
base._stop_worker()
def test_fleet_collective_runtime(self):
import paddle.fleet.runtime
collective_runtime = paddle.fleet.runtime.CollectiveRuntime()
import paddle.distributed.fleet.runtime
collective_runtime = paddle.distributed.fleet.runtime.CollectiveRuntime(
)
collective_runtime._init_worker()
collective_runtime._run_worker()
collective_runtime._init_worker()
......
......@@ -22,8 +22,8 @@ import tempfile
import os
import sys
from paddle.dataset.common import download, DATA_HOME
from paddle.fleet.base.util_factory import fleet_util
import paddle.fleet.base.role_maker as role_maker
from paddle.distributed.fleet.base.util_factory import fleet_util
import paddle.distributed.fleet.base.role_maker as role_maker
class TestFleetUtil(unittest.TestCase):
......@@ -34,7 +34,7 @@ class TestFleetUtil(unittest.TestCase):
train_dir = os.path.join("fleet_util_data", "train_program")
def test_util_base(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
util = fleet.UtilBase()
strategy = fleet.DistributedStrategy()
util._set_strategy(strategy)
......@@ -42,7 +42,7 @@ class TestFleetUtil(unittest.TestCase):
util._set_role_maker(role_maker)
def test_util_factory(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
factory = fleet.base.util_factory.UtilFactory()
strategy = fleet.DistributedStrategy()
role_maker = None # should be fleet.PaddleCloudRoleMaker()
......@@ -55,7 +55,7 @@ class TestFleetUtil(unittest.TestCase):
self.assertEqual(util.role_maker, None)
def test_get_util(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
......@@ -63,7 +63,7 @@ class TestFleetUtil(unittest.TestCase):
self.assertEqual(default_util, None)
def test_set_user_defined_util(self):
import paddle.fleet as fleet
import paddle.distributed.fleet as fleet
class UserDefinedUtil(fleet.UtilBase):
def __init__(self):
......@@ -81,7 +81,7 @@ class TestFleetUtil(unittest.TestCase):
self.assertEqual(user_id, 10)
def test_fs(self):
from paddle.fleet.utils import LocalFS
from paddle.distributed.fleet.utils import LocalFS
fs = LocalFS()
dirs, files = fs.ls_dir("test_tmp")
dirs, files = fs.ls_dir("./")
......
......@@ -20,7 +20,7 @@ import os
import sys
import inspect
from paddle.fleet.utils import LocalFS, FS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError
from paddle.distributed.fleet.utils import LocalFS, FS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError
class FSTest(unittest.TestCase):
......
......@@ -19,7 +19,7 @@ from paddle.fluid.incubate.fleet.collective import CollectiveOptimizer, fleet
import os
import sys
from paddle.fleet.utils import LocalFS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError
from paddle.distributed.fleet.utils import LocalFS, HDFSClient, FSTimeOut, FSFileExistsError, FSFileNotExistsError
java_home = os.environ["JAVA_HOME"]
......
......@@ -52,7 +52,7 @@ class TestDatasetWithStat(unittest.TestCase):
name=slot, shape=[1], dtype="int64", lod_level=1)
slots_vars.append(var)
dataset = paddle.fleet.DatasetFactory().create_dataset(
dataset = paddle.distributed.fleet.DatasetFactory().create_dataset(
"InMemoryDataset")
dataset.set_batch_size(32)
dataset.set_thread(3)
......
......@@ -145,14 +145,14 @@ packages=['paddle',
'paddle.incubate',
'paddle.incubate.complex',
'paddle.incubate.complex.tensor',
'paddle.fleet',
'paddle.fleet.base',
'paddle.fleet.meta_optimizers',
'paddle.fleet.runtime',
'paddle.fleet.dataset',
'paddle.fleet.metrics',
'paddle.fleet.proto',
'paddle.fleet.utils',
'paddle.distributed.fleet',
'paddle.distributed.fleet.base',
'paddle.distributed.fleet.meta_optimizers',
'paddle.distributed.fleet.runtime',
'paddle.distributed.fleet.dataset',
'paddle.distributed.fleet.metrics',
'paddle.distributed.fleet.proto',
'paddle.distributed.fleet.utils',
'paddle.framework',
'paddle.jit',
'paddle.fluid',
......@@ -482,7 +482,7 @@ with redirect_stdout():
},
entry_points={
'console_scripts': [
'fleetrun = paddle.fleet.launch:launch'
'fleetrun = paddle.distributed.fleet.launch:launch'
]
}
)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册