diff --git a/python/paddle/distributed/fleet/base/fleet_base.py b/python/paddle/distributed/fleet/base/fleet_base.py index 9ea912c78c56a98d662d2e0e5058ee890a0cc2ed..1a4b79e6ae1ca9444ab3827decbcc6847f33d27c 100644 --- a/python/paddle/distributed/fleet/base/fleet_base.py +++ b/python/paddle/distributed/fleet/base/fleet_base.py @@ -179,6 +179,9 @@ class Fleet(object): fleet.init(strategy=strategy) """ + if strategy is None: + strategy = DistributedStrategy() + self._user_defined_strategy = copy.deepcopy(strategy) if role_maker is None: if isinstance(is_collective, bool): @@ -220,10 +223,6 @@ class Fleet(object): else: paddle.distributed.init_parallel_env() - if strategy is None: - strategy = DistributedStrategy() - self._user_defined_strategy = copy.deepcopy(strategy) - def is_first_worker(self): """ Check whether the node is the first instance of worker. diff --git a/python/paddle/fluid/dygraph/parallel.py b/python/paddle/fluid/dygraph/parallel.py index 852684cb95d1ad678e5c914b48ab71919a2af55d..d7576ddc70a2760606fbfacf16ed7a0e826ca63f 100644 --- a/python/paddle/fluid/dygraph/parallel.py +++ b/python/paddle/fluid/dygraph/parallel.py @@ -395,11 +395,10 @@ class DataParallel(layers.Layer): 1024) self.init_reducer() else: - warnings.warn( - "nranks is less than 2, " - "maybe you need to check the current system environment." - " Need to use spawn or fleetrun to " - "start distributed programs.") + warnings.warn("The program will return to single-card operation. " + "Please check 1, whether you use spawn or fleetrun " + "to start the program. 2. Whether it is a multi-card " + "program. 3. Is the current environment multi-card.") def init_reducer(self): layers_param = []