未验证 提交 4064354a 编写于 作者: S ShenLiang 提交者: GitHub

support dp run single card (#29358)

上级 1decf4ad
...@@ -179,6 +179,9 @@ class Fleet(object): ...@@ -179,6 +179,9 @@ class Fleet(object):
fleet.init(strategy=strategy) fleet.init(strategy=strategy)
""" """
if strategy is None:
strategy = DistributedStrategy()
self._user_defined_strategy = copy.deepcopy(strategy)
if role_maker is None: if role_maker is None:
if isinstance(is_collective, bool): if isinstance(is_collective, bool):
...@@ -220,10 +223,6 @@ class Fleet(object): ...@@ -220,10 +223,6 @@ class Fleet(object):
else: else:
paddle.distributed.init_parallel_env() paddle.distributed.init_parallel_env()
if strategy is None:
strategy = DistributedStrategy()
self._user_defined_strategy = copy.deepcopy(strategy)
def is_first_worker(self): def is_first_worker(self):
""" """
Check whether the node is the first instance of worker. Check whether the node is the first instance of worker.
......
...@@ -395,11 +395,10 @@ class DataParallel(layers.Layer): ...@@ -395,11 +395,10 @@ class DataParallel(layers.Layer):
1024) 1024)
self.init_reducer() self.init_reducer()
else: else:
warnings.warn( warnings.warn("The program will return to single-card operation. "
"nranks is less than 2, " "Please check 1, whether you use spawn or fleetrun "
"maybe you need to check the current system environment." "to start the program. 2. Whether it is a multi-card "
" Need to use spawn or fleetrun to " "program. 3. Is the current environment multi-card.")
"start distributed programs.")
def init_reducer(self): def init_reducer(self):
layers_param = [] layers_param = []
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册