parallel_executor.cc 20.0 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
D
dzhwinter 已提交
16
#include <algorithm>
C
chengduoZH 已提交
17
#include <string>
18
#include <tuple>
Q
qiaolongfei 已提交
19
#include <vector>
C
chengduo 已提交
20
#include "paddle/fluid/framework/ir/graph_helper.h"
Y
Yu Yang 已提交
21

X
clean  
Xin Pan 已提交
22
#include "paddle/fluid/framework/ir/graph.h"
X
Xin Pan 已提交
23

Y
Yancey1989 已提交
24
#include "paddle/fluid/framework/details/all_reduce_deps_pass.h"
Q
Qiao Longfei 已提交
25
#include "paddle/fluid/framework/details/async_ssa_graph_executor.h"
Y
yuyang18 已提交
26
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
27
#include "paddle/fluid/framework/details/multi_devices_helper.h"
Y
Yancey1989 已提交
28
#include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h"
S
sneaxiy 已提交
29
#include "paddle/fluid/framework/details/reference_count_pass_helper.h"
Y
yuyang18 已提交
30
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
31
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
32
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
33

Y
Yu Yang 已提交
34
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
35
#include "gperftools/profiler.h"
Y
Yu Yang 已提交
36
#endif
Y
Yu Yang 已提交
37
DEFINE_string(pe_profile_fname, "",
Y
Yu Yang 已提交
38 39
              "Profiler filename for PE, which generated by gperftools."
              "Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable.");
40
DEFINE_bool(enable_parallel_graph, false,
Y
Yancey1989 已提交
41
            "Force disable parallel graph execution mode if set false.");
Y
Yu Yang 已提交
42

Y
Yang Yang 已提交
43
namespace paddle {
Y
Yu Yang 已提交
44 45
namespace framework {

Y
Yu Yang 已提交
46
static std::once_flag gProfileOnce;
Y
Yu Yang 已提交
47
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
48
static bool gProfileStarted = false;
Y
Yu Yang 已提交
49
#endif
Y
Yu Yang 已提交
50 51 52
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
53
      : places_(places) {
Y
Yu Yang 已提交
54
    if (!FLAGS_pe_profile_fname.empty()) {
Y
Yu Yang 已提交
55 56
      std::call_once(gProfileOnce, [] {
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
57
        ProfilerStart(FLAGS_pe_profile_fname.c_str());
Y
Yu Yang 已提交
58 59 60
        gProfileStarted = true;
#else
        LOG(WARNING) << "Paddle is not compiled with gperftools. "
Y
Yu Yang 已提交
61
                        "FLAGS_pe_profile_fname will be ignored";
Y
Yu Yang 已提交
62 63 64 65
#endif
      });
    }
  }
Y
Yu Yang 已提交
66

67 68 69 70 71 72 73 74 75 76 77
  ~ParallelExecutorPrivate() {
    if (own_local_scope_) {
      for (size_t i = 1; i < local_scopes_.size(); ++i) {
        // Skip the first scope, since it is the global scope.
        Scope *local_scope = local_scopes_[i];
        if (global_scope_->HasKid(local_scope)) {
          global_scope_->DeleteScope(local_scope);
        }
      }
    }
  }
S
sneaxiy 已提交
78

S
sneaxiy 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91 92
  std::unique_ptr<ir::Graph> PrepareGCAndRefCnts(
      std::unique_ptr<ir::Graph> graph, size_t max_memory_size);

  inline bool HasGarbageCollectors() const { return !gcs_.empty(); }

  void ResetRuntimeReferenceCount(const std::vector<std::string> &fetch_tensors,
                                  const std::string &fetched_var_name) {
    for (size_t i = 0; i < runtime_ref_cnts_.size(); ++i) {
      for (auto &pair : global_ref_cnts_[i]) {
        runtime_ref_cnts_[i][pair.first] = pair.second;
      }

      for (auto &fetch_name : fetch_tensors) {
        runtime_ref_cnts_[i].erase(fetch_name);
S
sneaxiy 已提交
93
      }
S
sneaxiy 已提交
94
      runtime_ref_cnts_[i].erase(fetched_var_name);
S
sneaxiy 已提交
95 96 97
    }
  }

D
dzhwinter 已提交
98
  BuildStrategy build_strategy_;
Y
Yu Yang 已提交
99 100
  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
101
  Scope *global_scope_;  // not owned
Y
Yu Yang 已提交
102
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
103

P
peizhilin 已提交
104
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yu Yang 已提交
105
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
106
#endif
C
chengduoZH 已提交
107 108
  bool own_local_scope_;
  bool use_cuda_;
109
  bool use_all_reduce_;
110
  size_t nranks_;
S
sneaxiy 已提交
111

S
sneaxiy 已提交
112 113 114 115 116 117
  // global_ref_cnts_ is only initialized when ParallelExecutor constructs, and
  // then keeps unchanged
  // Before each iteration, runtime_ref_cnts_ is reset to global_ref_cnts_
  std::vector<details::ReferenceCountMap> global_ref_cnts_;
  std::vector<details::AtomicReferenceCountMap> runtime_ref_cnts_;
  details::GarbageCollectorMap gcs_;
Y
Yu Yang 已提交
118 119
};

S
sneaxiy 已提交
120 121 122 123 124 125 126
std::unique_ptr<ir::Graph> ParallelExecutorPrivate::PrepareGCAndRefCnts(
    std::unique_ptr<ir::Graph> graph, size_t max_memory_size) {
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &place = places_[i];
    if (gcs_.count(place) > 0) {
      continue;
    }
S
sneaxiy 已提交
127
    std::unique_ptr<GarbageCollector> gc;
S
sneaxiy 已提交
128
#ifdef PADDLE_WITH_CUDA
S
sneaxiy 已提交
129 130
    if (platform::is_gpu_place(place)) {
      if (IsFastEagerDeletionModeEnabled()) {
S
sneaxiy 已提交
131 132
        gc.reset(new UnsafeFastGPUGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
133
      } else {
S
sneaxiy 已提交
134 135
        gc.reset(new StreamGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
136 137
      }
      VLOG(10) << "Created " << i << "-th GarbageCollector at " << place;
S
sneaxiy 已提交
138
    } else {
S
sneaxiy 已提交
139
#endif
S
sneaxiy 已提交
140 141 142 143 144 145 146
      if (platform::is_cpu_place(place)) {
        gc.reset(new CPUGarbageCollector(boost::get<platform::CPUPlace>(place),
                                         max_memory_size));
        VLOG(10) << "Created GarbageCollector at " << place;
      } else {
        PADDLE_THROW("Unsupported place for garbage collection");
      }
S
sneaxiy 已提交
147 148 149 150
#ifdef PADDLE_WITH_CUDA
    }
#endif

S
sneaxiy 已提交
151
    gcs_.emplace(place, std::move(gc));
S
sneaxiy 已提交
152 153
  }

S
sneaxiy 已提交
154
  if (!gcs_.empty()) {
S
sneaxiy 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
    std::vector<details::LastLiveOpsOfVars> last_live_ops_of_vars;

    auto ref_cnt_pass =
        ir::PassRegistry::Instance().Get("reference_count_pass");
    ref_cnt_pass->SetNotOwned(details::kGlobalReferenceCount,
                              &global_ref_cnts_);
    ref_cnt_pass->SetNotOwned(details::kLastLiveOpsOfVars,
                              &last_live_ops_of_vars);
    graph = ref_cnt_pass->Apply(std::move(graph));
    VLOG(10) << "ReferenceCountPass Applied";

    auto eager_deletion_pass =
        ir::PassRegistry::Instance().Get("eager_deletion_pass");
    eager_deletion_pass->SetNotOwned(details::kRuntimeReferenceCount,
                                     &runtime_ref_cnts_);
    eager_deletion_pass->SetNotOwned(details::kGarbageCollector, &gcs_);
    eager_deletion_pass->SetNotOwned(details::kLastLiveOpsOfVars,
                                     &last_live_ops_of_vars);
    eager_deletion_pass->SetNotOwned(details::kAllPlaces, &places_);
    graph = eager_deletion_pass->Apply(std::move(graph));
    VLOG(10) << "EagerDeletionPass Applied";
  }

  return graph;
}

181 182 183 184
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
185
ParallelExecutor::ParallelExecutor(
186
    const std::vector<platform::Place> &places,
187
    const std::unordered_set<std::string> &bcast_vars,
X
Xin Pan 已提交
188 189 190 191
    const std::string &loss_var_name, Scope *scope,
    const std::vector<Scope *> &local_scopes,
    const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy,
    ir::Graph *graph)
Y
Yu Yang 已提交
192
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
193
  member_->global_scope_ = scope;
194
  member_->use_cuda_ = exec_strategy.use_cuda_;
D
dzhwinter 已提交
195
  member_->build_strategy_ = build_strategy;
196 197
  member_->use_all_reduce_ =
      build_strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce;
X
Xin Pan 已提交
198
  member_->nranks_ = build_strategy.num_trainers_ * places.size();
199 200 201 202
  if (!member_->use_all_reduce_) {
    PADDLE_ENFORCE(places.size() > 1,
                   "If you set build_strategy.reduce with 'Reduce',"
                   "the number of places must be greater than 1.");
Y
Yancey1989 已提交
203 204
  }

205
  // Step 1. Bcast the bcast_vars to devs.
Y
Yu Yang 已提交
206
  // Create local scopes
207
  if (local_scopes.empty()) {
C
chengduoZH 已提交
208
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
209 210
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
211
      member_->local_scopes_.emplace_back(&scope->NewScope());
212 213
    }
  } else {
C
chengduoZH 已提交
214
    member_->own_local_scope_ = false;
215 216
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
217
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
218
    }
Y
Yu Yang 已提交
219 220
  }

Q
Qiao Longfei 已提交
221 222 223 224
  if (build_strategy.async_mode_) {
    PADDLE_ENFORCE(!member_->use_cuda_,
                   "gpu mode does not support async_mode_ now!");
  }
X
Xin Pan 已提交
225
  std::unique_ptr<ir::Graph> temp_owned_graph(graph);
Q
Qiao Longfei 已提交
226

Y
Yancey1989 已提交
227 228 229
  // FIXME(Yancey1989): parallel graph mode get better performance
  // in GPU allreduce distributed training. Need an elegant way to
  // choice the execution strategy.
X
Xin Pan 已提交
230 231
  build_strategy.enable_parallel_graph_ = EnableParallelGraphExecution(
      *temp_owned_graph, exec_strategy, build_strategy);
Y
Yancey1989 已提交
232 233 234 235
  if (build_strategy.enable_parallel_graph_)
    VLOG(0) << "The Executor would execute the graph by ParallelGraph "
               "Execution which can get better performance,"
            << "you can force it off by env FLAGS_enable_parallel_graph=0";
Y
Yancey1989 已提交
236

C
chengduoZH 已提交
237
  if (member_->use_cuda_) {
Y
Yu Yang 已提交
238
// Bcast Parameters to all GPUs
P
peizhilin 已提交
239
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yancey1989 已提交
240 241 242
    ncclUniqueId *nccl_id = nullptr;
    // gen_nccl_id operator can broadcast the ncclUniqueId for nccl2 collective
    // distributed training
C
chengduoZH 已提交
243
    auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
Y
Yancey1989 已提交
244
    if (nccl_id_var != nullptr) {
Y
Yancey1989 已提交
245
      nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
Y
Yancey1989 已提交
246
    }
247
    if (build_strategy.enable_parallel_graph_ && member_->nranks_ > 1UL) {
Y
Yancey1989 已提交
248 249 250 251
      if (nccl_id == nullptr) {
        local_nccl_id_.reset(new ncclUniqueId());
        platform::dynload::ncclGetUniqueId(local_nccl_id_.get());
        nccl_id = local_nccl_id_.get();
Y
Yancey1989 已提交
252
      }
C
chengduoZH 已提交
253
    }
Y
Yancey1989 已提交
254

C
chengduoZH 已提交
255
    member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
256 257
        member_->places_, nccl_id, build_strategy.num_trainers_,
        build_strategy.trainer_id_));
C
chengduoZH 已提交
258 259
#else
    PADDLE_THROW("Not compiled with CUDA");
Y
Yu Yang 已提交
260
#endif
C
chengduoZH 已提交
261 262
  }
  if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
Y
Yancey1989 已提交
263
    BCastParamsToDevices(bcast_vars);
Y
Yu Yang 已提交
264
  }
X
Xin Pan 已提交
265
// Startup Program has been run. All local scopes has correct parameters.
Y
yuyang18 已提交
266

X
Xin Pan 已提交
267 268
// Step 2. Convert main_program to SSA form and dependency graph. Also, insert
// ncclOp
P
peizhilin 已提交
269
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Q
Qiao Longfei 已提交
270 271
  if (build_strategy.async_mode_ && !build_strategy.is_distribution_) {
    VLOG(3) << "use local async mode";
272 273
    temp_owned_graph =
        build_strategy.Apply(std::move(temp_owned_graph), {member_->places_[0]}, loss_var_name,
Q
Qiao Longfei 已提交
274 275
                             {member_->local_scopes_[0]}, member_->nranks_,
                             member_->use_cuda_, member_->nccl_ctxs_.get());
Q
Qiao Longfei 已提交
276
  } else {
277
    temp_owned_graph = build_strategy.Apply(std::move(temp_owned_graph), member_->places_, loss_var_name,
Q
Qiao Longfei 已提交
278 279
                                 member_->local_scopes_, member_->nranks_,
                                 member_->use_cuda_, member_->nccl_ctxs_.get());
Q
Qiao Longfei 已提交
280
  }
C
chengduoZH 已提交
281
#else
Q
Qiao Longfei 已提交
282
  if (build_strategy.async_mode_ && !build_strategy.is_distribution_) {
Q
Qiao Longfei 已提交
283
    VLOG(3) << "use local async mode";
284
    temp_owned_graph = build_strategy.Apply(std::move(temp_owned_graph), {member_->places_[0]},
Q
Qiao Longfei 已提交
285 286
                                 loss_var_name, {member_->local_scopes_[0]},
                                 member_->nranks_, member_->use_cuda_);
Q
can run  
Qiao Longfei 已提交
287
  } else {
288
    temp_owned_graph = build_strategy.Apply(std::move(temp_owned_graph), member_->places_, loss_var_name,
Q
Qiao Longfei 已提交
289 290
                                 member_->local_scopes_, member_->nranks_,
                                 member_->use_cuda_);
Q
can run  
Qiao Longfei 已提交
291
  }
X
Xin Pan 已提交
292

Y
Yu Yang 已提交
293
#endif
Y
Yancey1989 已提交
294
  auto max_memory_size = GetEagerDeletionThreshold();
D
dzhwinter 已提交
295 296
  VLOG(10) << "Eager Deletion Threshold "
           << static_cast<float>(max_memory_size) / (1 << 30);
Y
Yancey1989 已提交
297
  if (max_memory_size >= 0) {
X
Xin Pan 已提交
298 299 300 301 302 303
    graph = member_
                ->PrepareGCAndRefCnts(std::move(temp_owned_graph),
                                      static_cast<size_t>(max_memory_size))
                .release();
  } else {
    graph = temp_owned_graph.release();
Y
Yancey1989 已提交
304 305
  }

306 307
  // Step 3. Create vars in each scope. Passes may also create new vars.
  //         skip control vars and empty vars
Y
Yancey1989 已提交
308
  std::vector<details::VariableInfo> var_infos;
Q
Qiao Longfei 已提交
309 310 311 312 313 314
  for (auto &node : graph->Nodes()) {
    if (node->IsVar() && !node->IsCtrlVar() && node->Var()) {
      var_infos.emplace_back();
      var_infos.back().name_ = node->Var()->Name();
      var_infos.back().type_ = node->Var()->GetType();
      var_infos.back().persistable_ = node->Var()->Persistable();
Y
Yancey1989 已提交
315 316
    }
  }
Y
Yancey1989 已提交
317

W
Wu Yi 已提交
318 319
  // If the loss_var_name is given, the number of graph should be only one.
  if (loss_var_name.size()) {
Q
Qiao Longfei 已提交
320
    size_t graph_num = ir::GraphNum(*graph);
C
chengduo 已提交
321 322 323 324
    if (graph_num > 1) {
      LOG(WARNING)
          << "The number of graph should be only one, "
             "but the current graph has "
Q
Qiao Longfei 已提交
325
          << ir::GraphNum(*graph)
C
chengduo 已提交
326 327 328 329 330
          << " sub_graphs. If you want to see the nodes of the "
             "sub_graphs, you should use 'FLAGS_print_sub_graph_dir' "
             "to specify the output dir. NOTES: if you not do training, "
             "please don't pass loss_var_name.";
    }
W
Wu Yi 已提交
331 332
  }

333
  if (build_strategy.async_mode_ && !build_strategy.is_distribution_) {
Q
can run  
Qiao Longfei 已提交
334 335 336
    VLOG(3) << "use AsyncSSAGraphExecutor";
    member_->executor_.reset(new details::AsyncSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, member_->places_,
337
        graph));
Q
can run  
Qiao Longfei 已提交
338 339
  } else if (build_strategy.enable_parallel_graph_) {
    VLOG(3) << "use ParallelSSAGraphExecutor";
Y
Yancey1989 已提交
340
#ifdef PADDLE_WITH_CUDA
Y
Yancey1989 已提交
341 342
    // TODO(Yancey1989): Remove passing in the main_program when
    // allreduce_seq_pass doesn't need it as the attr.
Y
Yancey1989 已提交
343
    member_->executor_.reset(new details::ParallelSSAGraphExecutor(
X
Xin Pan 已提交
344
        exec_strategy, member_->local_scopes_, member_->places_, graph));
Y
Yancey1989 已提交
345 346 347 348
#else
    PADDLE_THROW(
        "Paddle should be compiled with CUDA for ParallelGraph Execution.");
#endif
Y
yuyang18 已提交
349
  } else {
Y
Yancey1989 已提交
350
    if (exec_strategy.type_ == ExecutionStrategy::kDefault) {
Q
can run  
Qiao Longfei 已提交
351
      VLOG(3) << "use ThreadedSSAGraphExecutor";
Y
Yancey1989 已提交
352
      member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
X
Xin Pan 已提交
353
          exec_strategy, member_->local_scopes_, member_->places_, graph));
Y
Yancey1989 已提交
354
    } else {
Q
can run  
Qiao Longfei 已提交
355
      VLOG(3) << "use FastThreadedSSAGraphExecutor";
Y
Yancey1989 已提交
356
      member_->executor_.reset(new details::FastThreadedSSAGraphExecutor(
X
Xin Pan 已提交
357
          exec_strategy, member_->local_scopes_, member_->places_, graph));
Y
Yancey1989 已提交
358
    }
C
chengduoZH 已提交
359
  }
Y
yuyang18 已提交
360

Q
can run  
Qiao Longfei 已提交
361
  VLOG(3) << "use ScopeBufferedSSAGraphExecutor";
Y
yuyang18 已提交
362
  member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
Y
Yancey1989 已提交
363
      exec_strategy, member_->local_scopes_, std::move(var_infos),
Y
yuyang18 已提交
364
      member_->places_, std::move(member_->executor_)));
Y
Yu Yang 已提交
365 366
}

Y
Yancey1989 已提交
367
void ParallelExecutor::BCastParamsToDevices(
368
    const std::unordered_set<std::string> &vars) const {
Q
Qiao Longfei 已提交
369
  VLOG(3) << "BCastParamsToDevices";
X
Xin Pan 已提交
370
  // the initializing bcast, all vars would be bcast from device(0).
371
  for (auto &var : vars) {
X
Xin Pan 已提交
372
    framework::Variable *main_var = member_->local_scopes_[0]->FindVar(var);
J
JiayiFeng 已提交
373
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
374 375 376 377
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
378
    if (!main_tensor.IsInitialized()) {
M
minqiyang 已提交
379
      VLOG(3) << "one in var not inited, return!";
380 381
      continue;
    }
382 383
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
P
peizhilin 已提交
384
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
385
      std::vector<void *> buffers;
C
chengduo 已提交
386
      buffers.reserve(member_->places_.size());
387 388 389 390 391
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
392

X
Xin Pan 已提交
393
        if (i == 0) {
394 395
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
396
          auto local_scope = member_->local_scopes_[i];
397
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
398
          t->Resize(dims);
399
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
400
        }
401
        buffers.push_back(buffer);
402
      }
403

404 405 406 407 408 409
      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
                        "variables' buffer size to bcast NOT equal to places");
      {
        platform::NCCLGroupGuard guard;
        for (size_t i = 0; i < member_->places_.size(); ++i) {
          auto &nccl_ctx = member_->nccl_ctxs_->at(member_->places_[i]);
X
Xin Pan 已提交
410 411
          platform::dynload::ncclBcast(buffers[i], numel, data_type, 0,
                                       nccl_ctx.comm_, nccl_ctx.stream());
412
        }
413
        member_->nccl_ctxs_->WaitAll();
414
      }
C
chengduoZH 已提交
415 416 417
#else
      PADDLE_THROW("Not compiled with CUDA");
#endif
418 419
    } else {
      platform::CPUPlace cpu;
C
chengduo 已提交
420
      for (size_t i = 1; i < member_->places_.size(); ++i) {
421 422
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
C
chengduo 已提交
423

Q
Qiao Longfei 已提交
424
        auto copy_memory = [&] {
425 426 427
          t->Resize(dims);
          t->mutable_data(cpu, main_tensor.type());
          paddle::framework::TensorCopy(main_tensor, cpu, t);
Q
can run  
Qiao Longfei 已提交
428 429
        };

Q
Qiao Longfei 已提交
430
        auto share_memory = [&] { t->ShareDataWith(main_tensor); };
Q
can run  
Qiao Longfei 已提交
431 432 433 434 435 436 437

        // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
        if (member_->build_strategy_.async_mode_) {
          share_memory();
        } else if (member_->use_all_reduce_ || member_->use_cuda_ ||
                   var == "@LR_DECAY_COUNTER@") {
          copy_memory();
438
        } else {
Q
can run  
Qiao Longfei 已提交
439
          share_memory();
440
        }
Y
Yu Yang 已提交
441
      }
Y
Stash  
Yu Yang 已提交
442 443
    }
  }
Y
Yu Yang 已提交
444
}
Y
Yu Yang 已提交
445

Y
Yu Yang 已提交
446 447
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
Y
Yu Yang 已提交
448 449 450
#ifdef WITH_GPERFTOOLS
  if (gProfileStarted) {
    ProfilerFlush();
S
sneaxiy 已提交
451 452
  }
#endif
Y
Yu Yang 已提交
453

X
Xin Pan 已提交
454
  platform::RecordBlock b(0);
S
sneaxiy 已提交
455 456
  if (member_->HasGarbageCollectors()) {
    member_->ResetRuntimeReferenceCount(fetch_tensors, fetched_var_name);
S
sneaxiy 已提交
457
  }
S
sneaxiy 已提交
458 459 460
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
Y
Yu Yang 已提交
461
}
Y
Yu Yang 已提交
462

Y
Yu Yang 已提交
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
482 483 484 485 486
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
487 488
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
489
      auto t =
Y
Yu Yang 已提交
490
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
491 492
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
493 494 495 496
    }
  }
}

X
Xin Pan 已提交
497 498 499 500 501 502 503
ParallelExecutor::~ParallelExecutor() {
  for (auto &p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
  }
  delete member_;
}

504
bool ParallelExecutor::EnableParallelGraphExecution(
X
Xin Pan 已提交
505
    const ir::Graph &graph, const ExecutionStrategy &exec_strategy,
506
    const BuildStrategy &build_strategy) const {
Y
Yancey1989 已提交
507
  if (!FLAGS_enable_parallel_graph) return false;
508

Y
Yancey1989 已提交
509
  bool enable_parallel_graph = true;
510

X
Xin Pan 已提交
511 512 513 514 515 516 517 518 519 520 521 522 523
  for (ir::Node *node : graph.Nodes()) {
    if (node->IsVar() && node->Var()) {
      // TODO(Yancey1989): support sparse update in ParallelGraph mode.
      if (node->Var()->GetType() == proto::VarType::SELECTED_ROWS) {
        enable_parallel_graph = false;
        break;
      }
    } else if (node->IsOp() && node->Op()) {
      // TODO(Yancey1989): support pserver mode
      if (node->Op()->Type() == "send" || node->Op()->Type() == "recv") {
        enable_parallel_graph = false;
        break;
      }
524 525 526 527 528
    }
  }

  if (!member_->use_all_reduce_ || !member_->use_cuda_)

Y
Yancey1989 已提交
529 530 531
    if (build_strategy.enable_sequential_execution_ ||
        exec_strategy.type_ == ExecutionStrategy::ExecutorType::kExperimental)
      enable_parallel_graph = false;
Y
Yancey1989 已提交
532
  return enable_parallel_graph;
533 534
}

Y
Yu Yang 已提交
535
}  // namespace framework
Y
Yang Yang 已提交
536
}  // namespace paddle
S
sneaxiy 已提交
537

S
sneaxiy 已提交
538
USE_PASS(reference_count_pass);
S
sneaxiy 已提交
539
USE_PASS(eager_deletion_pass);