parallel_executor.cc 26.9 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
D
dzhwinter 已提交
16
#include <algorithm>
Q
qingqing01 已提交
17
#include <memory>
C
chengduoZH 已提交
18
#include <string>
19
#include <tuple>
Q
Qiao Longfei 已提交
20
#include <utility>
Q
qiaolongfei 已提交
21
#include <vector>
Q
Qiao Longfei 已提交
22
#include "paddle/fluid/framework/details/async_ssa_graph_executor.h"
Y
yuyang18 已提交
23
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
24
#include "paddle/fluid/framework/details/multi_devices_helper.h"
Y
Yancey1989 已提交
25
#include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h"
Y
yuyang18 已提交
26
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
27
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
28 29
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_helper.h"
30
#include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h"
31
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
32

Y
Yu Yang 已提交
33
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
34
#include "gperftools/profiler.h"
Y
Yu Yang 已提交
35
#endif
Y
Yu Yang 已提交
36
DEFINE_string(pe_profile_fname, "",
Y
Yu Yang 已提交
37 38
              "Profiler filename for PE, which generated by gperftools."
              "Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable.");
39
DEFINE_bool(enable_parallel_graph, false,
Y
Yancey1989 已提交
40
            "Force disable parallel graph execution mode if set false.");
Y
Yu Yang 已提交
41

Y
Yang Yang 已提交
42
namespace paddle {
Y
Yu Yang 已提交
43 44
namespace framework {

Y
Yu Yang 已提交
45
static std::once_flag gProfileOnce;
Y
Yu Yang 已提交
46
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
47
static bool gProfileStarted = false;
Y
Yu Yang 已提交
48
#endif
49

Y
Yu Yang 已提交
50 51 52
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
53
      : places_(places) {
Y
Yu Yang 已提交
54
    if (!FLAGS_pe_profile_fname.empty()) {
Y
Yu Yang 已提交
55 56
      std::call_once(gProfileOnce, [] {
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
57
        ProfilerStart(FLAGS_pe_profile_fname.c_str());
Y
Yu Yang 已提交
58 59 60
        gProfileStarted = true;
#else
        LOG(WARNING) << "Paddle is not compiled with gperftools. "
61
          "FLAGS_pe_profile_fname will be ignored";
Y
Yu Yang 已提交
62 63 64 65
#endif
      });
    }
  }
Y
Yu Yang 已提交
66

67 68 69 70 71 72 73 74 75 76 77
  ~ParallelExecutorPrivate() {
    if (own_local_scope_) {
      for (size_t i = 1; i < local_scopes_.size(); ++i) {
        // Skip the first scope, since it is the global scope.
        Scope *local_scope = local_scopes_[i];
        if (global_scope_->HasKid(local_scope)) {
          global_scope_->DeleteScope(local_scope);
        }
      }
    }
  }
S
sneaxiy 已提交
78

79
  ir::Graph *PrepareGCAndRefCnts(ir::Graph *graph, size_t max_memory_size);
S
sneaxiy 已提交
80 81 82 83 84 85 86 87 88 89 90 91

  inline bool HasGarbageCollectors() const { return !gcs_.empty(); }

  void ResetRuntimeReferenceCount(const std::vector<std::string> &fetch_tensors,
                                  const std::string &fetched_var_name) {
    for (size_t i = 0; i < runtime_ref_cnts_.size(); ++i) {
      for (auto &pair : global_ref_cnts_[i]) {
        runtime_ref_cnts_[i][pair.first] = pair.second;
      }

      for (auto &fetch_name : fetch_tensors) {
        runtime_ref_cnts_[i].erase(fetch_name);
S
sneaxiy 已提交
92
      }
S
sneaxiy 已提交
93
      runtime_ref_cnts_[i].erase(fetched_var_name);
S
sneaxiy 已提交
94 95 96
    }
  }

97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
  void InitNCCLCtxs(framework::Scope *scope, const BuildStrategy &bst) {
    VLOG(1) << "nccl comm num:" << bst.nccl_comm_num_ << ", nranks:" << nranks_
            << ", num_trainers:" << bst.num_trainers_
            << ", trainer_id:" << bst.trainer_id_;

    if (bst.use_hierarchical_allreduce_) {
      VLOG(1) << ", use_hierarchical_allreduce:"
              << bst.use_hierarchical_allreduce_ << ", inter_trainers_num:"
              << bst.hierarchical_allreduce_inter_nranks_
              << ", exter_trainers_num:"
              << bst.hierarchical_allreduce_exter_nranks_;
    }

    std::vector<ncclUniqueId *> flat_nccl_ids;
    if (nranks_ == 1) {
      // FIXME(gongwb): need not to create ncclid when nranks==1
      nccl_ctxs_.InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                              bst.trainer_id_);
      return;
    }

    if (bst.enable_parallel_graph_) {
      VLOG(1) << "use only one ncclid in pg model";

      ncclUniqueId *nccl_id = nullptr;

      std::string var_name = platform::GetFlatNCCLVarName(0);
      auto nccl_id_var = scope->FindVar(var_name);
      if (nccl_id_var) {
        nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
      } else {
        nccl_id = new ncclUniqueId();
        PADDLE_ENFORCE(platform::dynload::ncclGetUniqueId(nccl_id));
      }

      flat_nccl_ids.push_back(nccl_id);

      nccl_ctxs_.InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                              bst.trainer_id_);
      VLOG(1) << "init bst nccl context complete!";
      return;
    }

    // num_trainers ==1 && places > 1
    if (bst.num_trainers_ == 1) {
      nccl_ctxs_.InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                              bst.trainer_id_);
      return;
    }

    for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
      std::string var_name = platform::GetFlatNCCLVarName(i);
      auto nccl_id_var = scope->FindVar(var_name);
      PADDLE_ENFORCE(nccl_id_var, "can't find %s nccl_id_var", var_name);
      auto nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
      flat_nccl_ids.push_back(nccl_id);
    }

    nccl_ctxs_.InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                            bst.trainer_id_);

    if (bst.use_hierarchical_allreduce_) {
G
gongweibao 已提交
160 161 162 163 164 165 166 167
      std::vector<ncclUniqueId *> inter_nccl_ids;
      for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
        std::string var_name = platform::GetHierarchicalInterNCCLVarName(i);
        auto nccl_id_var = scope->FindVar(var_name);
        PADDLE_ENFORCE(nccl_id_var, "can't find %s nccl_id_var", var_name);
        auto inter_nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
        inter_nccl_ids.push_back(inter_nccl_id);
      }
168 169 170 171 172 173 174 175 176

      std::vector<ncclUniqueId *> exter_nccl_ids;
      for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
        std::string var_name = platform::GetHierarchicalExterNCCLVarName(i);
        auto nccl_id_var = scope->FindVar(var_name);
        PADDLE_ENFORCE(nccl_id_var, "can't find %s nccl_id_var", var_name);
        auto nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
        exter_nccl_ids.push_back(nccl_id);
      }
G
gongweibao 已提交
177 178

      nccl_ctxs_.InitHierarchicalCtxs(places_, inter_nccl_ids, exter_nccl_ids,
179 180 181 182 183 184 185
                                      bst.num_trainers_, bst.trainer_id_,
                                      bst.hierarchical_allreduce_inter_nranks_,
                                      bst.hierarchical_allreduce_exter_nranks_);
    }
  }
#endif

D
dzhwinter 已提交
186
  BuildStrategy build_strategy_;
Y
Yu Yang 已提交
187 188
  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
189
  Scope *global_scope_;  // not owned
Y
Yu Yang 已提交
190
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
191

P
peizhilin 已提交
192
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
193
  platform::MultiNCCLContextMap nccl_ctxs_;
Y
Yu Yang 已提交
194
#endif
C
chengduoZH 已提交
195 196
  bool own_local_scope_;
  bool use_cuda_;
197
  bool use_all_reduce_;
198
  size_t nranks_;
S
sneaxiy 已提交
199

S
sneaxiy 已提交
200 201 202
  // global_ref_cnts_ is only initialized when ParallelExecutor constructs, and
  // then keeps unchanged
  // Before each iteration, runtime_ref_cnts_ is reset to global_ref_cnts_
203 204 205
  std::vector<ir::ReferenceCountMap> global_ref_cnts_;
  std::vector<ir::AtomicReferenceCountMap> runtime_ref_cnts_;
  ir::GarbageCollectorMap gcs_;
Y
Yu Yang 已提交
206 207
};

208 209
ir::Graph *ParallelExecutorPrivate::PrepareGCAndRefCnts(
    ir::Graph *graph, size_t max_memory_size) {
S
sneaxiy 已提交
210 211 212 213 214
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &place = places_[i];
    if (gcs_.count(place) > 0) {
      continue;
    }
S
sneaxiy 已提交
215
    std::unique_ptr<GarbageCollector> gc;
S
sneaxiy 已提交
216
#ifdef PADDLE_WITH_CUDA
S
sneaxiy 已提交
217 218
    if (platform::is_gpu_place(place)) {
      if (IsFastEagerDeletionModeEnabled()) {
S
sneaxiy 已提交
219 220
        gc.reset(new UnsafeFastGPUGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
221
      } else {
S
sneaxiy 已提交
222 223
        gc.reset(new StreamGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
224 225
      }
      VLOG(10) << "Created " << i << "-th GarbageCollector at " << place;
S
sneaxiy 已提交
226
    } else {
S
sneaxiy 已提交
227
#endif
S
sneaxiy 已提交
228 229 230 231 232 233 234
      if (platform::is_cpu_place(place)) {
        gc.reset(new CPUGarbageCollector(boost::get<platform::CPUPlace>(place),
                                         max_memory_size));
        VLOG(10) << "Created GarbageCollector at " << place;
      } else {
        PADDLE_THROW("Unsupported place for garbage collection");
      }
S
sneaxiy 已提交
235 236 237 238
#ifdef PADDLE_WITH_CUDA
    }
#endif

S
sneaxiy 已提交
239
    gcs_.emplace(place, std::move(gc));
S
sneaxiy 已提交
240 241
  }

S
sneaxiy 已提交
242
  if (!gcs_.empty()) {
243
    std::vector<ir::LastLiveOpsOfVars> last_live_ops_of_vars;
S
sneaxiy 已提交
244 245 246

    auto ref_cnt_pass =
        ir::PassRegistry::Instance().Get("reference_count_pass");
247 248
    ref_cnt_pass->SetNotOwned(ir::kGlobalReferenceCount, &global_ref_cnts_);
    ref_cnt_pass->SetNotOwned(ir::kLastLiveOpsOfVars, &last_live_ops_of_vars);
249
    graph = ref_cnt_pass->Apply(graph);
S
sneaxiy 已提交
250 251 252 253
    VLOG(10) << "ReferenceCountPass Applied";

    auto eager_deletion_pass =
        ir::PassRegistry::Instance().Get("eager_deletion_pass");
254
    eager_deletion_pass->SetNotOwned(ir::kRuntimeReferenceCount,
S
sneaxiy 已提交
255
                                     &runtime_ref_cnts_);
256 257
    eager_deletion_pass->SetNotOwned(ir::kGarbageCollector, &gcs_);
    eager_deletion_pass->SetNotOwned(ir::kLastLiveOpsOfVars,
S
sneaxiy 已提交
258
                                     &last_live_ops_of_vars);
259
    eager_deletion_pass->SetNotOwned(ir::kAllPlaces, &places_);
260
    graph = eager_deletion_pass->Apply(graph);
S
sneaxiy 已提交
261 262 263 264 265
    VLOG(10) << "EagerDeletionPass Applied";
  }
  return graph;
}

266 267 268 269
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

270 271 272 273 274 275 276 277 278 279 280 281 282 283
void ParallelExecutor::DropLocalExeScopes() {
  auto executor = dynamic_cast<details::ScopeBufferedSSAGraphExecutor *>(
      member_->executor_.get());
  if (executor) {
    executor->DropLocalExeScopes();
  }
}

bool ParallelExecutor::NeedCreateLocalExeScope() {
  auto executor = dynamic_cast<details::ScopeBufferedSSAGraphExecutor *>(
      member_->executor_.get());
  return executor && executor->NeedCreateLocalExeScope();
}

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
/*
 * When nccl inits nccl comm using ncclCommInitAll, it meets error when
 * allreduce ophandle and sync_batch_norm_op use ncclallreduce parallelly. So
 * create a new nccl comm for sync_batch_norm_op. And these codes should be
 * polished with a unified nccl management.
 */
platform::NCCLContextMap *ParallelExecutor::GetNCCLContextForSyncbatchNomrOp(
    framework::Scope *scope) {
  auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
  if (nccl_id_var != nullptr) {
    return member_->nccl_ctxs_.DefaultFlatCtx();
  }

  if (dev_nccl_ctxs_.get() == nullptr) {
    dev_nccl_ctxs_.reset(new platform::NCCLContextMap(member_->places_));
  }
  return dev_nccl_ctxs_.get();
}
#endif

Y
Yan Xu 已提交
305 306 307 308 309 310 311 312
ParallelExecutor::ParallelExecutor(const std::vector<platform::Place> &places,
                                   const std::vector<std::string> &bcast_vars,
                                   const std::string &loss_var_name,
                                   Scope *scope,
                                   const std::vector<Scope *> &local_scopes,
                                   const ExecutionStrategy &exec_strategy,
                                   const BuildStrategy &build_strategy,
                                   ir::Graph *graph)
Y
Yu Yang 已提交
313
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
314
  member_->global_scope_ = scope;
315
  member_->use_cuda_ = exec_strategy.use_cuda_;
D
dzhwinter 已提交
316
  member_->build_strategy_ = build_strategy;
317 318
  member_->use_all_reduce_ =
      build_strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce;
X
Xin Pan 已提交
319
  member_->nranks_ = build_strategy.num_trainers_ * places.size();
320 321 322 323 324
#if defined(PADDLE_WITH_CUDA) && defined(_WIN32)
  if (member_->use_cuda_) {
    PADDLE_ENFORCE(places.size() == 1, "Windows can support Single GPU only.");
  }
#endif
325 326 327 328
  if (!member_->use_all_reduce_) {
    PADDLE_ENFORCE(places.size() > 1,
                   "If you set build_strategy.reduce with 'Reduce',"
                   "the number of places must be greater than 1.");
Y
Yancey1989 已提交
329 330
  }

C
chengduo 已提交
331 332 333 334 335 336
  LOG(WARNING) << string::Sprintf(
      "The number of %s, which is used in ParallelExecutor, is %lu. And "
      "the Program will be copied %lu copies",
      (member_->use_cuda_ ? "CUDAPlace" : "CPUPlace"), places.size(),
      places.size());

337
  // Step 1. Bcast the bcast_vars to devs.
Y
Yu Yang 已提交
338
  // Create local scopes
339
  if (local_scopes.empty()) {
C
chengduoZH 已提交
340
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
341 342
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
343
      member_->local_scopes_.emplace_back(&scope->NewScope());
344 345
    }
  } else {
C
chengduoZH 已提交
346
    member_->own_local_scope_ = false;
347 348
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
349
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
350
    }
Y
Yu Yang 已提交
351 352
  }

Q
Qiao Longfei 已提交
353
  std::vector<ir::Graph *> graphs;
Q
Qiao Longfei 已提交
354 355 356
  if (build_strategy.async_mode_) {
    PADDLE_ENFORCE(!member_->use_cuda_,
                   "gpu mode does not support async_mode_ now!");
Q
Qiao Longfei 已提交
357
    graphs.push_back(graph);
D
dongdaxiang 已提交
358
    for (size_t i = 1; i < places.size(); ++i) {
Q
Qiao Longfei 已提交
359 360 361 362
      auto *tmp_graph = new ir::Graph(graph->OriginProgram());
      async_graphs_.emplace_back(tmp_graph);
      graphs.push_back(tmp_graph);
    }
Q
Qiao Longfei 已提交
363
  }
Q
Qiao Longfei 已提交
364

Y
Yancey1989 已提交
365 366 367
  // FIXME(Yancey1989): parallel graph mode get better performance
  // in GPU allreduce distributed training. Need an elegant way to
  // choice the execution strategy.
368 369
  build_strategy.enable_parallel_graph_ =
      EnableParallelGraphExecution(*graph, exec_strategy, build_strategy);
Y
Yancey1989 已提交
370 371 372 373
  if (build_strategy.enable_parallel_graph_)
    VLOG(0) << "The Executor would execute the graph by ParallelGraph "
               "Execution which can get better performance,"
            << "you can force it off by env FLAGS_enable_parallel_graph=0";
Y
Yancey1989 已提交
374

C
chengduoZH 已提交
375
  if (member_->use_cuda_) {
Y
Yu Yang 已提交
376
// Bcast Parameters to all GPUs
P
peizhilin 已提交
377
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
378
    member_->InitNCCLCtxs(scope, build_strategy);
Q
qingqing01 已提交
379

W
Wu Yi 已提交
380 381 382
    // Initialize device context's nccl comm, will be used by normal
    // Operators like sync_batch_norm, and collective ops.
    // NOTE: more than one ParallelExecutor with same place, the nccl comm will
Q
qingqing01 已提交
383
    // be rewrite and there will be some problem.
W
Wu Yi 已提交
384 385 386
    // NOTE: NCCL group-calls and non-group-calls can not use the same
    // NCCL communicator, so for ParallelGraph and Multi-Process mode, re-use
    // same communicators.
387
    auto *nccl_ctxs = GetNCCLContextForSyncbatchNomrOp(scope);
Q
qingqing01 已提交
388 389 390 391 392
    for (size_t dev_id = 0; dev_id < member_->places_.size(); ++dev_id) {
      platform::DeviceContextPool &pool =
          platform::DeviceContextPool::Instance();
      auto *dev_ctx = static_cast<platform::CUDADeviceContext *>(
          pool.Get(member_->places_[dev_id]));
393
      auto &nccl_ctx = nccl_ctxs->at(member_->places_[dev_id]);
394
      dev_ctx->set_nccl_comm(nccl_ctx.comm());
Q
qingqing01 已提交
395
    }
Y
Yu Yang 已提交
396
#endif
C
chengduoZH 已提交
397
  }
Y
Yan Xu 已提交
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
  // broadcast parameters from the 0th device to others:
  auto need_broadcast = [&]() -> bool {
    if (build_strategy.num_trainers_ > 1) {
      // 1. num_tariners would be grater than 1 for nccl distributed training.
      return true;
    } else if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
      // 2. Only one trainer process, but ParallelExecutor hold multiple
      // devices.
      return true;
    }
    return false;
  };

  if (need_broadcast()) {
    BCastParamsToDevices(bcast_vars, build_strategy.trainer_id_);
Y
Yu Yang 已提交
413
  }
Q
Qiao Longfei 已提交
414
  // Startup Program has been run. All local scopes has correct parameters.
Y
yuyang18 已提交
415

Q
Qiao Longfei 已提交
416 417 418
  // Step 2. Convert main_program to SSA form and dependency graph. Also, insert
  // ncclOp
  std::vector<ir::Graph *> async_graphs(places.size());
P
peizhilin 已提交
419
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Q
Qiao Longfei 已提交
420
  if (build_strategy.async_mode_) {
Q
Qiao Longfei 已提交
421
    VLOG(3) << "use local async mode";
422 423
    graph = build_strategy.Apply(graph, {member_->places_[0]}, loss_var_name,
                                 {member_->local_scopes_[0]}, 1,
424
                                 member_->use_cuda_, &member_->nccl_ctxs_);
D
dongdaxiang 已提交
425
    for (size_t i = 1; i < member_->places_.size(); ++i) {
426 427 428
      graphs[i] =
          build_strategy.Apply(graphs[i], {member_->places_[i]}, loss_var_name,
                               {member_->local_scopes_[i]}, 1,
429
                               member_->use_cuda_, &member_->nccl_ctxs_);
430
      async_graphs[i] = graphs[i];
Q
Qiao Longfei 已提交
431
    }
Q
Qiao Longfei 已提交
432
  } else {
433 434
    graph = build_strategy.Apply(graph, member_->places_, loss_var_name,
                                 member_->local_scopes_, member_->nranks_,
435
                                 member_->use_cuda_, &member_->nccl_ctxs_);
Q
Qiao Longfei 已提交
436
  }
C
chengduoZH 已提交
437
#else
Q
Qiao Longfei 已提交
438
  if (build_strategy.async_mode_) {
Q
Qiao Longfei 已提交
439
    VLOG(3) << "use local async mode";
440 441 442
    graph = build_strategy.Apply(graph, {member_->places_[0]}, loss_var_name,
                                 {member_->local_scopes_[0]}, 1,
                                 member_->use_cuda_);
443
    for (size_t i = 1; i < member_->places_.size(); ++i) {
444 445
      graphs[i] = build_strategy.Apply(
          graphs[i], {member_->places_[i]}, loss_var_name,
Q
Qiao Longfei 已提交
446
          {member_->local_scopes_[i]}, 1, member_->use_cuda_);
447
      async_graphs[i] = graphs[i];
Q
Qiao Longfei 已提交
448
    }
Q
can run  
Qiao Longfei 已提交
449
  } else {
450 451 452
    graph = build_strategy.Apply(graph, member_->places_, loss_var_name,
                                 member_->local_scopes_, member_->nranks_,
                                 member_->use_cuda_);
Q
can run  
Qiao Longfei 已提交
453
  }
Y
Yu Yang 已提交
454
#endif
455

Y
Yancey1989 已提交
456
  auto max_memory_size = GetEagerDeletionThreshold();
D
dzhwinter 已提交
457 458
  VLOG(10) << "Eager Deletion Threshold "
           << static_cast<float>(max_memory_size) / (1 << 30);
Y
Yancey1989 已提交
459
  if (max_memory_size >= 0) {
460 461
    graph = member_->PrepareGCAndRefCnts(graph,
                                         static_cast<size_t>(max_memory_size));
Y
Yancey1989 已提交
462 463
  }

Q
Qiao Longfei 已提交
464 465
  async_graphs[0] = graph;

466 467
  // Step 3. Create vars in each scope. Passes may also create new vars.
  //         skip control vars and empty vars
Y
Yancey1989 已提交
468
  std::vector<details::VariableInfo> var_infos;
Q
Qiao Longfei 已提交
469 470 471 472 473 474
  for (auto &node : graph->Nodes()) {
    if (node->IsVar() && !node->IsCtrlVar() && node->Var()) {
      var_infos.emplace_back();
      var_infos.back().name_ = node->Var()->Name();
      var_infos.back().type_ = node->Var()->GetType();
      var_infos.back().persistable_ = node->Var()->Persistable();
Y
Yancey1989 已提交
475 476
    }
  }
Y
Yancey1989 已提交
477

W
Wu Yi 已提交
478 479
  // If the loss_var_name is given, the number of graph should be only one.
  if (loss_var_name.size()) {
Q
Qiao Longfei 已提交
480
    size_t graph_num = ir::GraphNum(*graph);
C
chengduo 已提交
481 482 483 484
    if (graph_num > 1) {
      LOG(WARNING)
          << "The number of graph should be only one, "
             "but the current graph has "
Q
Qiao Longfei 已提交
485
          << ir::GraphNum(*graph)
C
chengduo 已提交
486 487 488 489 490
          << " sub_graphs. If you want to see the nodes of the "
             "sub_graphs, you should use 'FLAGS_print_sub_graph_dir' "
             "to specify the output dir. NOTES: if you not do training, "
             "please don't pass loss_var_name.";
    }
W
Wu Yi 已提交
491 492
  }

Q
Qiao Longfei 已提交
493
  if (build_strategy.async_mode_) {
Q
can run  
Qiao Longfei 已提交
494 495
    VLOG(3) << "use AsyncSSAGraphExecutor";
    member_->executor_.reset(new details::AsyncSSAGraphExecutor(
Q
Qiao Longfei 已提交
496
        exec_strategy, member_->local_scopes_, member_->places_, async_graphs));
Q
can run  
Qiao Longfei 已提交
497 498
  } else if (build_strategy.enable_parallel_graph_) {
    VLOG(3) << "use ParallelSSAGraphExecutor";
Y
Yancey1989 已提交
499
#ifdef PADDLE_WITH_CUDA
Y
Yancey1989 已提交
500 501
    // TODO(Yancey1989): Remove passing in the main_program when
    // allreduce_seq_pass doesn't need it as the attr.
Y
Yancey1989 已提交
502
    member_->executor_.reset(new details::ParallelSSAGraphExecutor(
X
Xin Pan 已提交
503
        exec_strategy, member_->local_scopes_, member_->places_, graph));
Y
Yancey1989 已提交
504 505 506 507
#else
    PADDLE_THROW(
        "Paddle should be compiled with CUDA for ParallelGraph Execution.");
#endif
Y
yuyang18 已提交
508
  } else {
Y
Yancey1989 已提交
509
    if (exec_strategy.type_ == ExecutionStrategy::kDefault) {
Q
can run  
Qiao Longfei 已提交
510
      VLOG(3) << "use ThreadedSSAGraphExecutor";
Y
Yancey1989 已提交
511
      member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
X
Xin Pan 已提交
512
          exec_strategy, member_->local_scopes_, member_->places_, graph));
Y
Yancey1989 已提交
513
    } else {
Q
can run  
Qiao Longfei 已提交
514
      VLOG(3) << "use FastThreadedSSAGraphExecutor";
Y
Yancey1989 已提交
515
      member_->executor_.reset(new details::FastThreadedSSAGraphExecutor(
X
Xin Pan 已提交
516
          exec_strategy, member_->local_scopes_, member_->places_, graph));
Y
Yancey1989 已提交
517
    }
C
chengduoZH 已提交
518
  }
Y
yuyang18 已提交
519

Q
can run  
Qiao Longfei 已提交
520
  VLOG(3) << "use ScopeBufferedSSAGraphExecutor";
Q
Qiao Longfei 已提交
521 522 523 524 525
  if (!build_strategy.async_mode_) {
    member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, std::move(var_infos),
        member_->places_, std::move(member_->executor_)));
  }
Y
Yu Yang 已提交
526 527
}

Y
Yancey1989 已提交
528
void ParallelExecutor::BCastParamsToDevices(
Y
Yan Xu 已提交
529
    const std::vector<std::string> &vars, int trainer_id) const {
Q
Qiao Longfei 已提交
530
  VLOG(3) << "BCastParamsToDevices";
X
Xin Pan 已提交
531
  // the initializing bcast, all vars would be bcast from device(0).
532
  for (auto &var : vars) {
X
Xin Pan 已提交
533
    framework::Variable *main_var = member_->local_scopes_[0]->FindVar(var);
J
JiayiFeng 已提交
534
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
535 536 537 538
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
539
    if (!main_tensor.IsInitialized()) {
M
minqiyang 已提交
540
      VLOG(3) << "one in var not inited, return!";
541 542
      continue;
    }
543 544
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
P
peizhilin 已提交
545
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
546
      std::vector<void *> buffers;
C
chengduo 已提交
547
      buffers.reserve(member_->places_.size());
548 549 550 551 552
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
553

Y
Yan Xu 已提交
554
        if (i == 0 && trainer_id == 0) {
555 556
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
557
          auto local_scope = member_->local_scopes_[i];
558
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
559
          t->Resize(dims);
560
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
561
        }
562
        buffers.push_back(buffer);
563
      }
564

565 566 567
      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
                        "variables' buffer size to bcast NOT equal to places");
      {
568
        auto *nccl_ctxs = member_->nccl_ctxs_.DefaultFlatCtx();
569 570
        platform::NCCLGroupGuard guard;
        for (size_t i = 0; i < member_->places_.size(); ++i) {
571
          auto &nccl_ctx = nccl_ctxs->at(member_->places_[i]);
X
Xin Pan 已提交
572 573
          platform::dynload::ncclBcast(buffers[i], numel, data_type, 0,
                                       nccl_ctx.comm_, nccl_ctx.stream());
574
        }
575
        nccl_ctxs->WaitAll();
576
      }
C
chengduoZH 已提交
577
#endif
578 579
    } else {
      platform::CPUPlace cpu;
C
chengduo 已提交
580
      for (size_t i = 1; i < member_->places_.size(); ++i) {
581 582
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
C
chengduo 已提交
583

Q
Qiao Longfei 已提交
584
        auto copy_memory = [&] {
585 586 587
          t->Resize(dims);
          t->mutable_data(cpu, main_tensor.type());
          paddle::framework::TensorCopy(main_tensor, cpu, t);
Q
can run  
Qiao Longfei 已提交
588 589
        };

Q
Qiao Longfei 已提交
590
        auto share_memory = [&] { t->ShareDataWith(main_tensor); };
Q
can run  
Qiao Longfei 已提交
591 592 593 594 595 596 597

        // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
        if (member_->build_strategy_.async_mode_) {
          share_memory();
        } else if (member_->use_all_reduce_ || member_->use_cuda_ ||
                   var == "@LR_DECAY_COUNTER@") {
          copy_memory();
598
        } else {
Q
can run  
Qiao Longfei 已提交
599
          share_memory();
600
        }
Y
Yu Yang 已提交
601
      }
Y
Stash  
Yu Yang 已提交
602 603
    }
  }
Y
Yu Yang 已提交
604
}
Y
Yu Yang 已提交
605

Y
Yu Yang 已提交
606 607
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
608
  VLOG(3) << "enter ParallelExecutor Run";
Y
Yu Yang 已提交
609 610 611
#ifdef WITH_GPERFTOOLS
  if (gProfileStarted) {
    ProfilerFlush();
S
sneaxiy 已提交
612 613
  }
#endif
Y
Yu Yang 已提交
614

X
Xin Pan 已提交
615
  platform::RecordBlock b(0);
S
sneaxiy 已提交
616
  if (member_->HasGarbageCollectors()) {
617
    platform::RecordEvent event("PrepareGarbageCollectors");
S
sneaxiy 已提交
618
    member_->ResetRuntimeReferenceCount(fetch_tensors, fetched_var_name);
S
sneaxiy 已提交
619
  }
620 621

  VLOG(3) << "ParallelExecutor begin to run member_->executor_->Run";
S
sneaxiy 已提交
622 623 624
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
Y
Yu Yang 已提交
625
}
Y
Yu Yang 已提交
626

Y
Yu Yang 已提交
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
C
chengduo 已提交
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
    if (member_->places_.size() != lod_tensors.size()) {
      bool is_cpu_place = platform::is_cpu_place(member_->places_.front());
      auto error_info = string::Sprintf(
          "The number(%d) of samples of "
          "current batch is less than the count(%d) of "
          "devices(%s), currently, it is not allowed. ",
          member_->places_.size(), lod_tensors.size(),
          (is_cpu_place ? "CPU" : "GPU"));
      if (is_cpu_place) {
        error_info +=
            "You should set the environment variable CPU_NUM in the system "
            "to determine the number of devices you need.";
      }
      PADDLE_THROW(error_info);
    }
X
Xin Pan 已提交
661 662
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
663
      auto t =
Y
Yu Yang 已提交
664
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
665 666
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
667 668 669 670
    }
  }
}

X
Xin Pan 已提交
671 672 673 674 675 676 677
ParallelExecutor::~ParallelExecutor() {
  for (auto &p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
  }
  delete member_;
}

678
bool ParallelExecutor::EnableParallelGraphExecution(
X
Xin Pan 已提交
679
    const ir::Graph &graph, const ExecutionStrategy &exec_strategy,
680
    const BuildStrategy &build_strategy) const {
681 682 683
  if (!FLAGS_enable_parallel_graph) {
    return false;
  }
684

Y
Yancey1989 已提交
685
  bool enable_parallel_graph = true;
686

X
Xin Pan 已提交
687 688 689 690 691 692 693 694 695 696 697 698 699
  for (ir::Node *node : graph.Nodes()) {
    if (node->IsVar() && node->Var()) {
      // TODO(Yancey1989): support sparse update in ParallelGraph mode.
      if (node->Var()->GetType() == proto::VarType::SELECTED_ROWS) {
        enable_parallel_graph = false;
        break;
      }
    } else if (node->IsOp() && node->Op()) {
      // TODO(Yancey1989): support pserver mode
      if (node->Op()->Type() == "send" || node->Op()->Type() == "recv") {
        enable_parallel_graph = false;
        break;
      }
700 701 702
    }
  }

703
  if (!member_->use_all_reduce_ || !member_->use_cuda_) {
Y
Yancey1989 已提交
704
    if (build_strategy.enable_sequential_execution_ ||
705
        exec_strategy.type_ == ExecutionStrategy::ExecutorType::kExperimental) {
Y
Yancey1989 已提交
706
      enable_parallel_graph = false;
707 708 709 710 711 712 713 714 715
    }
  }

#ifdef WIN32
  VLOG(1) << "Windows has no support to parallel graph, enable_parallel_graph "
             "would be forced to false.";
  enable_parallel_graph = false;
#endif

Y
Yancey1989 已提交
716
  return enable_parallel_graph;
717 718
}

Y
Yu Yang 已提交
719
}  // namespace framework
Y
Yang Yang 已提交
720
}  // namespace paddle
S
sneaxiy 已提交
721

S
sneaxiy 已提交
722
USE_PASS(reference_count_pass);
S
sneaxiy 已提交
723
USE_PASS(eager_deletion_pass);