未验证 提交 b6bc4cb5 编写于 作者: S ShenLiang 提交者: GitHub

support dp run single card (#29358) (#29372)

上级 374822b6
......@@ -179,6 +179,9 @@ class Fleet(object):
fleet.init(strategy=strategy)
"""
if strategy is None:
strategy = DistributedStrategy()
self._user_defined_strategy = copy.deepcopy(strategy)
if role_maker is None:
if isinstance(is_collective, bool):
......@@ -220,10 +223,6 @@ class Fleet(object):
else:
paddle.distributed.init_parallel_env()
if strategy is None:
strategy = DistributedStrategy()
self._user_defined_strategy = copy.deepcopy(strategy)
def is_first_worker(self):
"""
Check whether the node is the first instance of worker.
......
......@@ -395,11 +395,10 @@ class DataParallel(layers.Layer):
1024)
self.init_reducer()
else:
warnings.warn(
"nranks is less than 2, "
"maybe you need to check the current system environment."
" Need to use spawn or fleetrun to "
"start distributed programs.")
warnings.warn("The program will return to single-card operation. "
"Please check 1, whether you use spawn or fleetrun "
"to start the program. 2. Whether it is a multi-card "
"program. 3. Is the current environment multi-card.")
def init_reducer(self):
layers_param = []
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册