diff --git a/paddle/fluid/framework/details/all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc index f4c21c795855df71c46bb81e92223fd3b6b01706..535bcd46e8281d5a7b417f9e3b7c4b2127cd0739 100644 --- a/paddle/fluid/framework/details/all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -107,7 +107,7 @@ void AllReduceOpHandle::RunImpl() { auto &trg = *this->local_scopes_[0] ->FindVar(kLocalExecScopeName) ->Get() - ->Var() + ->FindVar(in_var_handles[0]->name_) ->GetMutable(); // Reduce All Tensor to trg in CPU diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 11e4353d44c5716fa3b877828774339ac156641a..50fc085d672a4541b0d52ed47ef1ba82268e92b1 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -121,8 +121,9 @@ class ParallelExecutor(object): # performance. Worth tunning for other models in the future. exec_strategy.num_threads = len(self._places) * 4 else: - # Currently num_threads must be 1. - exec_strategy.num_threads = 1 + cpu_num = int( + os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + exec_strategy.num_threads = cpu_num if build_strategy is None: build_strategy = BuildStrategy()