diff --git a/paddle/fluid/framework/details/build_strategy.cc b/paddle/fluid/framework/details/build_strategy.cc index 010c8dee6c414027c0a5665f69692f21b298d297..a6359402f8dc40b3006ba6a21370a3af84ba0f9c 100644 --- a/paddle/fluid/framework/details/build_strategy.cc +++ b/paddle/fluid/framework/details/build_strategy.cc @@ -133,15 +133,15 @@ class ParallelExecutorPassBuilder : public ir::PassBuilder { void AppendMultiDevPass(const BuildStrategy &strategy) { ir::Pass *multi_devices_pass; if (strategy_.is_distribution_) { - VLOG(3) << "multi device dist train mode"; + VLOG(3) << "multi device parameter server mode"; multi_devices_pass = AppendPass("dist_multi_devices_pass").get(); } else { if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) { - VLOG(3) << "multi device allreduce mode"; + VLOG(3) << "multi devices collective mode with allreduce"; multi_devices_pass = AppendPass("allreduce_mode_multi_devices_pass").get(); } else if (strategy.reduce_ == BuildStrategy::ReduceStrategy::kReduce) { - VLOG(3) << "multi device reduce mode"; + VLOG(3) << "multi deivces collective mode with reduce"; multi_devices_pass = AppendPass("reduce_mode_multi_devices_pass").get(); } else { PADDLE_THROW("Unknown reduce strategy."); diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 2b69fd89a2c19f933a7ff7bb3bcc4bbb2c49e7e9..d253f0cca8eecbd97aa033ec7209b6fcdb36aa08 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -35,15 +35,6 @@ def _place_obj(place): return p -def _is_pserver_mode(main_program): - main = main_program if main_program \ - else framework.default_main_program() - for op in main.global_block().ops: - if op.type in ["send", "recv"]: - return True - return False - - class CompiledProgram(object): """ Compiles a Program for execution. @@ -120,7 +111,8 @@ class CompiledProgram(object): self._exec_strategy = ExecutionStrategy() if self._build_strategy is None: self._build_strategy = BuildStrategy() - self._build_strategy.is_distribution = _is_pserver_mode(self._program) + self._build_strategy.is_distribution = framework.is_pserver_mode( + self._program) return self def with_inference_optimize(self, config): diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 832c97c7deb49b4e118e15989ab7a34da6ce57a0..162e94ec59434bfcf018d73e0c49f9cc21e81185 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -85,6 +85,15 @@ def _current_expected_place(): return _imperative_current_expected_place_ +def is_pserver_mode(main_program): + main = main_program if main_program \ + else default_main_program() + for op in main.global_block().ops: + if op.type in ["send", "recv"]: + return True + return False + + class NameScope(object): def __init__(self, name="", parent=None): self._children = dict() diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 22212ae9a216acaab3f295f1f8d091829a0aa471..9bff3599a04799d208b3d91bf65a30914c3e80aa 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -29,15 +29,6 @@ ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy BuildStrategy = core.ParallelExecutor.BuildStrategy -def _is_pserver_mode(main_program): - main = main_program if main_program \ - else framework.default_main_program() - for op in main.global_block().ops: - if op.type in ["send", "recv"]: - return True - return False - - class ParallelExecutor(object): """ ParallelExecutor is designed for data parallelism, which focuses on distributing @@ -140,7 +131,7 @@ class ParallelExecutor(object): # FIXME(zcd): is_distribution_ is a temporary field, because in pserver mode, # num_trainers is 1, so the current fields of build_strategy doesn't tell if # it's distributed model. - build_strategy.is_distribution = _is_pserver_mode( + build_strategy.is_distribution = framework.is_pserver_mode( main_program) or num_trainers > 1 # step4: get main_program, scope, local_scopes