parallel_executor.cc 32.4 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
D
dzhwinter 已提交
16
#include <algorithm>
Q
qingqing01 已提交
17
#include <memory>
C
chengduoZH 已提交
18
#include <string>
19
#include <tuple>
Q
Qiao Longfei 已提交
20
#include <utility>
Q
qiaolongfei 已提交
21
#include <vector>
Q
Qiao Longfei 已提交
22
#include "paddle/fluid/framework/details/async_ssa_graph_executor.h"
Y
yuyang18 已提交
23
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
24
#include "paddle/fluid/framework/details/multi_devices_helper.h"
25
#include "paddle/fluid/framework/details/op_handle_base.h"
Y
Yancey1989 已提交
26
#include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h"
Y
yuyang18 已提交
27
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
28
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
29 30
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_helper.h"
31
#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h"
32
#include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h"
33
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
34

Y
Yu Yang 已提交
35
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
36
#include "gperftools/profiler.h"
Y
Yu Yang 已提交
37
#endif
Y
Yu Yang 已提交
38
DEFINE_string(pe_profile_fname, "",
Y
Yu Yang 已提交
39 40
              "Profiler filename for PE, which generated by gperftools."
              "Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable.");
41
DEFINE_bool(enable_parallel_graph, false,
Y
Yancey1989 已提交
42
            "Force disable parallel graph execution mode if set false.");
Y
Yu Yang 已提交
43

Y
Yang Yang 已提交
44
namespace paddle {
Y
Yu Yang 已提交
45 46
namespace framework {

Y
Yu Yang 已提交
47
static std::once_flag gProfileOnce;
Y
Yu Yang 已提交
48
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
49
static bool gProfileStarted = false;
Y
Yu Yang 已提交
50
#endif
51

Y
Yu Yang 已提交
52 53 54
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
55
      : places_(places) {
Y
Yu Yang 已提交
56
    if (!FLAGS_pe_profile_fname.empty()) {
Y
Yu Yang 已提交
57 58
      std::call_once(gProfileOnce, [] {
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
59
        ProfilerStart(FLAGS_pe_profile_fname.c_str());
Y
Yu Yang 已提交
60 61 62
        gProfileStarted = true;
#else
        LOG(WARNING) << "Paddle is not compiled with gperftools. "
63
          "FLAGS_pe_profile_fname will be ignored";
Y
Yu Yang 已提交
64 65 66 67
#endif
      });
    }
  }
Y
Yu Yang 已提交
68

69 70 71 72 73 74 75 76 77 78 79
  ~ParallelExecutorPrivate() {
    if (own_local_scope_) {
      for (size_t i = 1; i < local_scopes_.size(); ++i) {
        // Skip the first scope, since it is the global scope.
        Scope *local_scope = local_scopes_[i];
        if (global_scope_->HasKid(local_scope)) {
          global_scope_->DeleteScope(local_scope);
        }
      }
    }
  }
S
sneaxiy 已提交
80

81
  ir::Graph *ApplyMemoryOptimizePass(ir::Graph *graph);
S
sneaxiy 已提交
82 83 84

  inline bool HasGarbageCollectors() const { return !gcs_.empty(); }

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
  void InitNCCLCtxs(framework::Scope *scope, const BuildStrategy &bst) {
    VLOG(1) << "nccl comm num:" << bst.nccl_comm_num_ << ", nranks:" << nranks_
            << ", num_trainers:" << bst.num_trainers_
            << ", trainer_id:" << bst.trainer_id_;

    if (bst.use_hierarchical_allreduce_) {
      VLOG(1) << ", use_hierarchical_allreduce:"
              << bst.use_hierarchical_allreduce_ << ", inter_trainers_num:"
              << bst.hierarchical_allreduce_inter_nranks_
              << ", exter_trainers_num:"
              << bst.hierarchical_allreduce_exter_nranks_;
    }

    std::vector<ncclUniqueId *> flat_nccl_ids;
    if (nranks_ == 1) {
      // FIXME(gongwb): need not to create ncclid when nranks==1
102 103
      nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                               bst.trainer_id_);
104 105 106 107 108 109 110 111 112 113 114 115
      return;
    }

    if (bst.enable_parallel_graph_) {
      VLOG(1) << "use only one ncclid in pg model";

      ncclUniqueId *nccl_id = nullptr;

      std::string var_name = platform::GetFlatNCCLVarName(0);
      auto nccl_id_var = scope->FindVar(var_name);
      if (nccl_id_var) {
        nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
116
        VLOG(10) << "find nccl_id_var:" << var_name << ", nccl_id:" << nccl_id;
117 118 119
      } else {
        nccl_id = new ncclUniqueId();
        PADDLE_ENFORCE(platform::dynload::ncclGetUniqueId(nccl_id));
120 121
        VLOG(10) << "can't find nccl_id_var:" << var_name
                 << ", nccl_id:" << nccl_id;
122 123 124 125
      }

      flat_nccl_ids.push_back(nccl_id);

126 127
      nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                               bst.trainer_id_);
128 129 130 131 132 133
      VLOG(1) << "init bst nccl context complete!";
      return;
    }

    // num_trainers ==1 && places > 1
    if (bst.num_trainers_ == 1) {
134 135
      nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                               bst.trainer_id_);
136 137 138 139 140 141 142 143 144 145 146
      return;
    }

    for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
      std::string var_name = platform::GetFlatNCCLVarName(i);
      auto nccl_id_var = scope->FindVar(var_name);
      PADDLE_ENFORCE(nccl_id_var, "can't find %s nccl_id_var", var_name);
      auto nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
      flat_nccl_ids.push_back(nccl_id);
    }

147 148
    nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                             bst.trainer_id_);
149 150

    if (bst.use_hierarchical_allreduce_) {
G
gongweibao 已提交
151 152 153 154 155 156 157 158
      std::vector<ncclUniqueId *> inter_nccl_ids;
      for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
        std::string var_name = platform::GetHierarchicalInterNCCLVarName(i);
        auto nccl_id_var = scope->FindVar(var_name);
        PADDLE_ENFORCE(nccl_id_var, "can't find %s nccl_id_var", var_name);
        auto inter_nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
        inter_nccl_ids.push_back(inter_nccl_id);
      }
159 160 161 162 163 164 165 166 167

      std::vector<ncclUniqueId *> exter_nccl_ids;
      for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
        std::string var_name = platform::GetHierarchicalExterNCCLVarName(i);
        auto nccl_id_var = scope->FindVar(var_name);
        PADDLE_ENFORCE(nccl_id_var, "can't find %s nccl_id_var", var_name);
        auto nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
        exter_nccl_ids.push_back(nccl_id);
      }
G
gongweibao 已提交
168

169 170 171 172
      nccl_ctxs_->InitHierarchicalCtxs(
          places_, inter_nccl_ids, exter_nccl_ids, bst.num_trainers_,
          bst.trainer_id_, bst.hierarchical_allreduce_inter_nranks_,
          bst.hierarchical_allreduce_exter_nranks_);
173 174
    }
  }
175

176
  void InitOrGetNCCLCommunicator(framework::Scope *scope, BuildStrategy *bst) {
177 178 179 180 181 182 183 184 185 186 187
    const std::string var_name = "NCCLCommunicator";
    auto var = scope->FindVar(var_name);
    if (var != nullptr) {
      PADDLE_ENFORCE(var->IsInitialized(),
                     "if %s exists, it must be initialized", var_name);
      VLOG(1) << "find " << var_name
              << " in scope, so use it and does not recreate!";
      nccl_ctxs_ = var->GetMutable<platform::NCCLCommunicator>();
      return;
    }

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
    if (bst->use_hierarchical_allreduce_) {
      PADDLE_ENFORCE(bst->num_trainers_ > 1, "num_trainers:%llu < 1",
                     bst->num_trainers_);
      PADDLE_ENFORCE(bst->hierarchical_allreduce_inter_nranks_ > 1,
                     "inter_nranks:%d < 1",
                     bst->hierarchical_allreduce_inter_nranks_);
      PADDLE_ENFORCE(
          (bst->num_trainers_ % bst->hierarchical_allreduce_inter_nranks_ == 0),
          "num_trainers:%llu mod inter_nranks:%d != 0", bst->num_trainers_,
          bst->hierarchical_allreduce_inter_nranks_);

      bst->hierarchical_allreduce_exter_nranks_ =
          bst->num_trainers_ / bst->hierarchical_allreduce_inter_nranks_;
    }

203 204
    VLOG(1) << "not find " << var_name << " in scope, so recreate it!";
    nccl_ctxs_ = scope->Var(var_name)->GetMutable<platform::NCCLCommunicator>();
205
    InitNCCLCtxs(scope, *bst);
206
  }
207 208
#endif

209 210 211 212 213
  inline bool IsPersistable(const std::string &name) const {
    auto iter = is_persistable_.find(name);
    return iter != is_persistable_.end() && iter->second;
  }

D
dzhwinter 已提交
214
  BuildStrategy build_strategy_;
Y
Yu Yang 已提交
215 216
  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
217
  std::vector<Scope *> local_exec_scopes_;
218
  Scope *global_scope_;  // not owned
Y
Yu Yang 已提交
219
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
220

221 222
  std::unordered_map<std::string, bool> is_persistable_;

P
peizhilin 已提交
223
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
224
  platform::NCCLCommunicator *nccl_ctxs_{nullptr};
Y
Yu Yang 已提交
225
#endif
C
chengduoZH 已提交
226 227
  bool own_local_scope_;
  bool use_cuda_;
228
  bool use_all_reduce_;
229
  size_t nranks_;
S
sneaxiy 已提交
230

231
  ir::MemOptVarInfoMapList mem_opt_var_infos_;
232
  ir::GarbageCollectorMap gcs_;
Y
Yu Yang 已提交
233 234
};

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
ir::Graph *ParallelExecutorPrivate::ApplyMemoryOptimizePass(ir::Graph *graph) {
  std::vector<ir::LastLiveOpsOfVars> last_live_ops_of_vars;

  auto ref_cnt_pass = ir::PassRegistry::Instance().Get("reference_count_pass");
  ref_cnt_pass->SetNotOwned(ir::kMemOptVarInfoMapList, &mem_opt_var_infos_);
  ref_cnt_pass->SetNotOwned(ir::kLastLiveOpsOfVars, &last_live_ops_of_vars);
  graph = ref_cnt_pass->Apply(graph);
  VLOG(10) << "ReferenceCountPass Applied";

  if (build_strategy_.enable_inplace_) {
    auto inplace_pass =
        ir::PassRegistry::Instance().Get("buffer_shared_inplace_pass");
    inplace_pass->SetNotOwned(ir::kMemOptVarInfoMapList, &mem_opt_var_infos_);
    inplace_pass->SetNotOwned(ir::kLastLiveOpsOfVars, &last_live_ops_of_vars);
    inplace_pass->SetNotOwned(ir::kUseCuda, &use_cuda_);
    VLOG(10) << "Start to apply buffer_shared_inplace_pass";
    graph = inplace_pass->Apply(graph);
    VLOG(10) << "buffer_shared_inplace_pass Applied";
Z
Zeng Jinle 已提交
253 254
    LOG(INFO) << "Inplace strategy is enabled, when "
                 "build_strategy.enable_inplace = True";
255 256
  }

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
  /**
   * NOTE(zengjinle): If BuildStrategy.memory_optimize = None in Python,
   * set BuildStrategy.memory_optimize according to whether gc is enabled.
   * If gc is enabled, BuildStrategy.memory_optimize = False.
   * If gc is disabled, BuildStrategy.memory_optimize = True.
   * This is because gc+memory_optimize is worse than gc only.
   *
   * As an option, users can enable BuildStrategy.memory_optimize forcely
   * by setting True, and disable it forcely by setting False.
   */
  bool is_gc_enabled = (GetEagerDeletionThreshold() >= 0);
  if (!build_strategy_.memory_optimize_) {
    build_strategy_.memory_optimize_ = !is_gc_enabled;
  }

  if (build_strategy_.memory_optimize_.get()) {
273 274 275 276 277 278 279 280 281 282
    auto cross_op_memory_reuse_pass = ir::PassRegistry::Instance().Get(
        "buffer_shared_cross_op_memory_reuse_pass");
    cross_op_memory_reuse_pass->SetNotOwned(ir::kMemOptVarInfoMapList,
                                            &mem_opt_var_infos_);
    cross_op_memory_reuse_pass->SetNotOwned(ir::kLastLiveOpsOfVars,
                                            &last_live_ops_of_vars);
    cross_op_memory_reuse_pass->SetNotOwned(ir::kUseCuda, &use_cuda_);
    VLOG(10) << "Start to apply buffer_shared_cross_op_memory_reuse_pass";
    graph = cross_op_memory_reuse_pass->Apply(graph);
    VLOG(10) << "buffer_shared_cross_op_memory_reuse_pass Applied";
Z
Zeng Jinle 已提交
283 284 285
    LOG(INFO) << "Cross op memory reuse strategy is enabled, when "
                 "build_strategy.memory_optimize = True or garbage collection "
                 "strategy is disabled, which is not recommended";
286
  }
287

288
  if (!is_gc_enabled) {
289 290 291 292
    return graph;
  }
  size_t max_memory_size = static_cast<size_t>(GetEagerDeletionThreshold());

S
sneaxiy 已提交
293 294 295 296 297
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &place = places_[i];
    if (gcs_.count(place) > 0) {
      continue;
    }
S
sneaxiy 已提交
298
    std::unique_ptr<GarbageCollector> gc;
S
sneaxiy 已提交
299
#ifdef PADDLE_WITH_CUDA
S
sneaxiy 已提交
300 301
    if (platform::is_gpu_place(place)) {
      if (IsFastEagerDeletionModeEnabled()) {
S
sneaxiy 已提交
302 303
        gc.reset(new UnsafeFastGPUGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
304
      } else {
S
sneaxiy 已提交
305 306
        gc.reset(new StreamGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
307 308
      }
      VLOG(10) << "Created " << i << "-th GarbageCollector at " << place;
S
sneaxiy 已提交
309
    } else {
S
sneaxiy 已提交
310
#endif
S
sneaxiy 已提交
311 312 313 314 315 316 317
      if (platform::is_cpu_place(place)) {
        gc.reset(new CPUGarbageCollector(boost::get<platform::CPUPlace>(place),
                                         max_memory_size));
        VLOG(10) << "Created GarbageCollector at " << place;
      } else {
        PADDLE_THROW("Unsupported place for garbage collection");
      }
S
sneaxiy 已提交
318 319 320 321
#ifdef PADDLE_WITH_CUDA
    }
#endif

S
sneaxiy 已提交
322
    gcs_.emplace(place, std::move(gc));
S
sneaxiy 已提交
323 324
  }

S
sneaxiy 已提交
325
  if (!gcs_.empty()) {
S
sneaxiy 已提交
326 327
    auto eager_deletion_pass =
        ir::PassRegistry::Instance().Get("eager_deletion_pass");
328 329
    eager_deletion_pass->SetNotOwned(ir::kMemOptVarInfoMapList,
                                     &mem_opt_var_infos_);
330 331
    eager_deletion_pass->SetNotOwned(ir::kGarbageCollector, &gcs_);
    eager_deletion_pass->SetNotOwned(ir::kLastLiveOpsOfVars,
S
sneaxiy 已提交
332
                                     &last_live_ops_of_vars);
333
    eager_deletion_pass->SetNotOwned(ir::kAllPlaces, &places_);
334
    graph = eager_deletion_pass->Apply(graph);
S
sneaxiy 已提交
335
    VLOG(10) << "EagerDeletionPass Applied";
336 337 338
    LOG(INFO) << "Garbage collection strategy is enabled, when "
              << "FLAGS_eager_delete_tensor_gb = "
              << (static_cast<double>(GetEagerDeletionThreshold()) / (1 << 30));
S
sneaxiy 已提交
339 340 341 342
  }
  return graph;
}

343 344 345 346
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

347 348 349 350 351 352 353 354 355 356 357 358 359 360
void ParallelExecutor::DropLocalExeScopes() {
  auto executor = dynamic_cast<details::ScopeBufferedSSAGraphExecutor *>(
      member_->executor_.get());
  if (executor) {
    executor->DropLocalExeScopes();
  }
}

bool ParallelExecutor::NeedCreateLocalExeScope() {
  auto executor = dynamic_cast<details::ScopeBufferedSSAGraphExecutor *>(
      member_->executor_.get());
  return executor && executor->NeedCreateLocalExeScope();
}

Y
Yan Xu 已提交
361 362 363 364 365 366 367 368
ParallelExecutor::ParallelExecutor(const std::vector<platform::Place> &places,
                                   const std::vector<std::string> &bcast_vars,
                                   const std::string &loss_var_name,
                                   Scope *scope,
                                   const std::vector<Scope *> &local_scopes,
                                   const ExecutionStrategy &exec_strategy,
                                   const BuildStrategy &build_strategy,
                                   ir::Graph *graph)
Y
Yu Yang 已提交
369
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
370
  member_->global_scope_ = scope;
371
  member_->use_cuda_ = exec_strategy.use_cuda_;
D
dzhwinter 已提交
372
  member_->build_strategy_ = build_strategy;
C
chengduo 已提交
373 374
  member_->use_all_reduce_ = member_->build_strategy_.reduce_ ==
                             BuildStrategy::ReduceStrategy::kAllReduce;
X
Xin Pan 已提交
375
  member_->nranks_ = build_strategy.num_trainers_ * places.size();
C
chengduo 已提交
376 377 378 379 380 381 382
  if (!member_->use_all_reduce_ && member_->nranks_ == 1) {
    LOG(INFO) << "If you set build_strategy.reduce with 'Reduce',"
                 "the number of places should be greater than 1.";
    member_->build_strategy_.reduce_ =
        BuildStrategy::ReduceStrategy::kAllReduce;
    member_->use_all_reduce_ = true;
  }
383 384 385 386 387
#if defined(PADDLE_WITH_CUDA) && defined(_WIN32)
  if (member_->use_cuda_) {
    PADDLE_ENFORCE(places.size() == 1, "Windows can support Single GPU only.");
  }
#endif
Y
Yancey1989 已提交
388

389
  LOG(INFO) << string::Sprintf(
C
chengduo 已提交
390 391 392 393 394
      "The number of %s, which is used in ParallelExecutor, is %lu. And "
      "the Program will be copied %lu copies",
      (member_->use_cuda_ ? "CUDAPlace" : "CPUPlace"), places.size(),
      places.size());

395
  // Step 1. Bcast the bcast_vars to devs.
Y
Yu Yang 已提交
396
  // Create local scopes
397
  if (local_scopes.empty()) {
C
chengduoZH 已提交
398
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
399 400
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
401
      member_->local_scopes_.emplace_back(&scope->NewScope());
402 403
    }
  } else {
C
chengduoZH 已提交
404
    member_->own_local_scope_ = false;
405 406
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
407
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
408
    }
Y
Yu Yang 已提交
409 410
  }

Q
Qiao Longfei 已提交
411
  std::vector<ir::Graph *> graphs;
C
chengduo 已提交
412
  if (member_->build_strategy_.async_mode_) {
Q
Qiao Longfei 已提交
413 414
    PADDLE_ENFORCE(!member_->use_cuda_,
                   "gpu mode does not support async_mode_ now!");
Q
Qiao Longfei 已提交
415
    graphs.push_back(graph);
D
dongdaxiang 已提交
416
    for (size_t i = 1; i < places.size(); ++i) {
Q
Qiao Longfei 已提交
417 418 419 420
      auto *tmp_graph = new ir::Graph(graph->OriginProgram());
      async_graphs_.emplace_back(tmp_graph);
      graphs.push_back(tmp_graph);
    }
Q
Qiao Longfei 已提交
421
  }
Q
Qiao Longfei 已提交
422

Y
Yancey1989 已提交
423 424 425
  // FIXME(Yancey1989): parallel graph mode get better performance
  // in GPU allreduce distributed training. Need an elegant way to
  // choice the execution strategy.
C
chengduo 已提交
426 427 428 429
  member_->build_strategy_.enable_parallel_graph_ =
      EnableParallelGraphExecution(*graph, exec_strategy,
                                   member_->build_strategy_);
  if (member_->build_strategy_.enable_parallel_graph_) {
430 431 432 433
    LOG(INFO) << "The Executor would execute the graph by ParallelGraph "
                 "Execution which can get better performance,"
              << "you can force it off by env FLAGS_enable_parallel_graph=0";
  }
Y
Yancey1989 已提交
434

435
  if (member_->use_cuda_ && member_->nranks_ > 1) {
P
peizhilin 已提交
436
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
437
    member_->InitOrGetNCCLCommunicator(scope, &member_->build_strategy_);
Q
qingqing01 已提交
438

W
Wu Yi 已提交
439 440 441
    // Initialize device context's nccl comm, will be used by normal
    // Operators like sync_batch_norm, and collective ops.
    // NOTE: more than one ParallelExecutor with same place, the nccl comm will
Q
qingqing01 已提交
442
    // be rewrite and there will be some problem.
W
Wu Yi 已提交
443 444 445
    // NOTE: NCCL group-calls and non-group-calls can not use the same
    // NCCL communicator, so for ParallelGraph and Multi-Process mode, re-use
    // same communicators.
446 447
    auto *nccl_ctxs =
        member_->nccl_ctxs_->GetSyncBatchNormCtx(scope, member_->places_);
448
    auto &pool = platform::DeviceContextPool::Instance();
Q
qingqing01 已提交
449 450 451
    for (size_t dev_id = 0; dev_id < member_->places_.size(); ++dev_id) {
      auto *dev_ctx = static_cast<platform::CUDADeviceContext *>(
          pool.Get(member_->places_[dev_id]));
452
      auto &nccl_ctx = nccl_ctxs->at(member_->places_[dev_id]);
453
      dev_ctx->set_nccl_comm(nccl_ctx.comm());
Q
qingqing01 已提交
454
    }
Y
Yu Yang 已提交
455
#endif
C
chengduoZH 已提交
456
  }
Y
Yan Xu 已提交
457 458
  // broadcast parameters from the 0th device to others:
  auto need_broadcast = [&]() -> bool {
C
chengduo 已提交
459
    if (member_->build_strategy_.num_trainers_ > 1) {
Y
Yan Xu 已提交
460 461 462 463 464 465 466 467 468
      // 1. num_tariners would be grater than 1 for nccl distributed training.
      return true;
    } else if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
      // 2. Only one trainer process, but ParallelExecutor hold multiple
      // devices.
      return true;
    }
    return false;
  };
469
  // Bcast Parameters to all GPUs
Y
Yan Xu 已提交
470
  if (need_broadcast()) {
C
chengduo 已提交
471
    BCastParamsToDevices(bcast_vars, member_->build_strategy_.trainer_id_);
Y
Yu Yang 已提交
472
  }
473

Q
Qiao Longfei 已提交
474
  // Startup Program has been run. All local scopes has correct parameters.
Y
yuyang18 已提交
475

Q
Qiao Longfei 已提交
476 477 478
  // Step 2. Convert main_program to SSA form and dependency graph. Also, insert
  // ncclOp
  std::vector<ir::Graph *> async_graphs(places.size());
P
peizhilin 已提交
479
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
C
chengduo 已提交
480
  if (member_->build_strategy_.async_mode_) {
Q
Qiao Longfei 已提交
481
    VLOG(3) << "use local async mode";
C
chengduo 已提交
482 483 484 485
    graph = member_->build_strategy_.Apply(
        graph, {member_->places_[0]}, loss_var_name,
        {member_->local_scopes_[0]}, 1, member_->use_cuda_,
        member_->nccl_ctxs_);
D
dongdaxiang 已提交
486
    for (size_t i = 1; i < member_->places_.size(); ++i) {
C
chengduo 已提交
487 488 489 490
      graphs[i] = member_->build_strategy_.Apply(
          graphs[i], {member_->places_[i]}, loss_var_name,
          {member_->local_scopes_[i]}, 1, member_->use_cuda_,
          member_->nccl_ctxs_);
491
      async_graphs[i] = graphs[i];
Q
Qiao Longfei 已提交
492
    }
Q
Qiao Longfei 已提交
493
  } else {
C
chengduo 已提交
494 495 496
    graph = member_->build_strategy_.Apply(
        graph, member_->places_, loss_var_name, member_->local_scopes_,
        member_->nranks_, member_->use_cuda_, member_->nccl_ctxs_);
Q
Qiao Longfei 已提交
497
  }
C
chengduoZH 已提交
498
#else
C
chengduo 已提交
499
  if (member_->build_strategy_.async_mode_) {
Q
Qiao Longfei 已提交
500
    VLOG(3) << "use local async mode";
C
chengduo 已提交
501 502 503
    graph = member_->build_strategy_.Apply(
        graph, {member_->places_[0]}, loss_var_name,
        {member_->local_scopes_[0]}, 1, member_->use_cuda_);
504
    for (size_t i = 1; i < member_->places_.size(); ++i) {
C
chengduo 已提交
505
      graphs[i] = member_->build_strategy_.Apply(
506
          graphs[i], {member_->places_[i]}, loss_var_name,
Q
Qiao Longfei 已提交
507
          {member_->local_scopes_[i]}, 1, member_->use_cuda_);
508
      async_graphs[i] = graphs[i];
Q
Qiao Longfei 已提交
509
    }
Q
can run  
Qiao Longfei 已提交
510
  } else {
C
chengduo 已提交
511 512 513
    graph = member_->build_strategy_.Apply(
        graph, member_->places_, loss_var_name, member_->local_scopes_,
        member_->nranks_, member_->use_cuda_);
Q
can run  
Qiao Longfei 已提交
514
  }
Y
Yu Yang 已提交
515
#endif
516

517
  graph = member_->ApplyMemoryOptimizePass(graph);
Y
Yancey1989 已提交
518

Q
Qiao Longfei 已提交
519 520
  async_graphs[0] = graph;

521 522
  // Step 3. Create vars in each scope. Passes may also create new vars.
  //         skip control vars and empty vars
Y
Yancey1989 已提交
523
  std::vector<details::VariableInfo> var_infos;
Q
Qiao Longfei 已提交
524 525 526 527 528 529
  for (auto &node : graph->Nodes()) {
    if (node->IsVar() && !node->IsCtrlVar() && node->Var()) {
      var_infos.emplace_back();
      var_infos.back().name_ = node->Var()->Name();
      var_infos.back().type_ = node->Var()->GetType();
      var_infos.back().persistable_ = node->Var()->Persistable();
530 531 532

      member_->is_persistable_.emplace(node->Var()->Name(),
                                       node->Var()->Persistable());
Y
Yancey1989 已提交
533 534
    }
  }
Y
Yancey1989 已提交
535

W
Wu Yi 已提交
536 537
  // If the loss_var_name is given, the number of graph should be only one.
  if (loss_var_name.size()) {
Q
Qiao Longfei 已提交
538
    size_t graph_num = ir::GraphNum(*graph);
C
chengduo 已提交
539 540 541 542
    if (graph_num > 1) {
      LOG(WARNING)
          << "The number of graph should be only one, "
             "but the current graph has "
Q
Qiao Longfei 已提交
543
          << ir::GraphNum(*graph)
C
chengduo 已提交
544 545 546 547 548
          << " sub_graphs. If you want to see the nodes of the "
             "sub_graphs, you should use 'FLAGS_print_sub_graph_dir' "
             "to specify the output dir. NOTES: if you not do training, "
             "please don't pass loss_var_name.";
    }
W
Wu Yi 已提交
549 550
  }

551 552 553 554 555 556 557 558 559 560 561 562
  std::unordered_map<Scope *, Scope *> scope_map;
  for (auto *scope : member_->local_scopes_) {
    auto &local_exec_scope = scope->NewScope();
    member_->local_exec_scopes_.emplace_back(&local_exec_scope);
    scope_map.emplace(scope, &local_exec_scope);
  }

  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(),
                    member_->local_exec_scopes_.size());

  std::vector<ir::Graph *> final_graphs;

C
chengduo 已提交
563
  if (member_->build_strategy_.async_mode_) {
Q
can run  
Qiao Longfei 已提交
564 565
    VLOG(3) << "use AsyncSSAGraphExecutor";
    member_->executor_.reset(new details::AsyncSSAGraphExecutor(
566 567 568
        exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
        member_->places_, async_graphs));
    final_graphs = async_graphs;
C
chengduo 已提交
569
  } else if (member_->build_strategy_.enable_parallel_graph_) {
Q
can run  
Qiao Longfei 已提交
570
    VLOG(3) << "use ParallelSSAGraphExecutor";
Y
Yancey1989 已提交
571
#ifdef PADDLE_WITH_CUDA
Y
Yancey1989 已提交
572 573
    // TODO(Yancey1989): Remove passing in the main_program when
    // allreduce_seq_pass doesn't need it as the attr.
574 575 576 577 578
    auto *pg_exe = new details::ParallelSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
        member_->places_, graph);
    final_graphs = pg_exe->Graphs();
    member_->executor_.reset(pg_exe);
Y
Yancey1989 已提交
579 580 581 582
#else
    PADDLE_THROW(
        "Paddle should be compiled with CUDA for ParallelGraph Execution.");
#endif
Y
yuyang18 已提交
583
  } else {
Y
Yancey1989 已提交
584
    if (exec_strategy.type_ == ExecutionStrategy::kDefault) {
Q
can run  
Qiao Longfei 已提交
585
      VLOG(3) << "use ThreadedSSAGraphExecutor";
Y
Yancey1989 已提交
586
      member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
587 588
          exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
          member_->places_, graph));
Y
Yancey1989 已提交
589
    } else {
Q
can run  
Qiao Longfei 已提交
590
      VLOG(3) << "use FastThreadedSSAGraphExecutor";
Y
Yancey1989 已提交
591
      member_->executor_.reset(new details::FastThreadedSSAGraphExecutor(
592 593
          exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
          member_->places_, graph));
Y
Yancey1989 已提交
594
    }
595
    final_graphs.emplace_back(graph);
C
chengduoZH 已提交
596
  }
Y
yuyang18 已提交
597

Q
can run  
Qiao Longfei 已提交
598
  VLOG(3) << "use ScopeBufferedSSAGraphExecutor";
C
chengduo 已提交
599
  if (!member_->build_strategy_.async_mode_) {
Q
Qiao Longfei 已提交
600
    member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
601 602 603 604 605 606 607 608 609
        exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
        std::move(var_infos), member_->places_, std::move(member_->executor_)));
  }

  for (auto *g : final_graphs) {
    auto ops = ir::FilterByNodeWrapper<details::OpHandleBase>(*g);
    for (auto *op : ops) {
      op->SetLocalExecScopes(scope_map);
    }
Q
Qiao Longfei 已提交
610
  }
Y
Yu Yang 已提交
611 612
}

Y
Yancey1989 已提交
613
void ParallelExecutor::BCastParamsToDevices(
Y
Yan Xu 已提交
614
    const std::vector<std::string> &vars, int trainer_id) const {
Q
Qiao Longfei 已提交
615
  VLOG(3) << "BCastParamsToDevices";
X
Xin Pan 已提交
616
  // the initializing bcast, all vars would be bcast from device(0).
617
  for (auto &var : vars) {
X
Xin Pan 已提交
618
    framework::Variable *main_var = member_->local_scopes_[0]->FindVar(var);
J
JiayiFeng 已提交
619
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
620 621 622 623
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
624
    if (!main_tensor.IsInitialized()) {
M
minqiyang 已提交
625
      VLOG(3) << "one in var not inited, return!";
626 627
      continue;
    }
628 629
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
P
peizhilin 已提交
630
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
631
      std::vector<void *> buffers;
C
chengduo 已提交
632
      buffers.reserve(member_->places_.size());
633 634 635 636 637
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
638

Y
Yan Xu 已提交
639
        if (i == 0 && trainer_id == 0) {
640 641
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
642
          auto local_scope = member_->local_scopes_[i];
643
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
644
          t->Resize(dims);
645
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
646
        }
647
        buffers.push_back(buffer);
648
      }
649

650 651 652
      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
                        "variables' buffer size to bcast NOT equal to places");
      {
653
        auto *nccl_ctxs = member_->nccl_ctxs_->DefaultFlatCtx();
654 655
        platform::NCCLGroupGuard guard;
        for (size_t i = 0; i < member_->places_.size(); ++i) {
656
          auto &nccl_ctx = nccl_ctxs->at(member_->places_[i]);
X
Xin Pan 已提交
657 658
          platform::dynload::ncclBcast(buffers[i], numel, data_type, 0,
                                       nccl_ctx.comm_, nccl_ctx.stream());
659
        }
660
        nccl_ctxs->WaitAll();
661
      }
C
chengduoZH 已提交
662
#endif
663 664
    } else {
      platform::CPUPlace cpu;
C
chengduo 已提交
665
      for (size_t i = 1; i < member_->places_.size(); ++i) {
666 667
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
C
chengduo 已提交
668

Q
Qiao Longfei 已提交
669
        auto copy_memory = [&] {
670 671 672
          t->Resize(dims);
          t->mutable_data(cpu, main_tensor.type());
          paddle::framework::TensorCopy(main_tensor, cpu, t);
Q
can run  
Qiao Longfei 已提交
673 674
        };

Q
Qiao Longfei 已提交
675
        auto share_memory = [&] { t->ShareDataWith(main_tensor); };
Q
can run  
Qiao Longfei 已提交
676 677 678 679 680 681 682

        // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
        if (member_->build_strategy_.async_mode_) {
          share_memory();
        } else if (member_->use_all_reduce_ || member_->use_cuda_ ||
                   var == "@LR_DECAY_COUNTER@") {
          copy_memory();
683
        } else {
Q
can run  
Qiao Longfei 已提交
684
          share_memory();
685
        }
Y
Yu Yang 已提交
686
      }
Y
Stash  
Yu Yang 已提交
687 688
    }
  }
Y
Yu Yang 已提交
689
}
Y
Yu Yang 已提交
690

691 692
FeedFetchList ParallelExecutor::Run(
    const std::vector<std::string> &fetch_tensors) {
693
  VLOG(3) << "enter ParallelExecutor Run";
Y
Yu Yang 已提交
694 695 696
#ifdef WITH_GPERFTOOLS
  if (gProfileStarted) {
    ProfilerFlush();
S
sneaxiy 已提交
697 698
  }
#endif
Y
Yu Yang 已提交
699

X
Xin Pan 已提交
700
  platform::RecordBlock b(0);
701 702 703

  ir::SkipMemOptVarsGuard guard(&(member_->mem_opt_var_infos_), fetch_tensors,
                                member_->HasGarbageCollectors());
704 705

  VLOG(3) << "ParallelExecutor begin to run member_->executor_->Run";
S
sneaxiy 已提交
706
  auto fetch_data = member_->executor_->Run(fetch_tensors);
707
  return fetch_data;
Y
Yu Yang 已提交
708
}
Y
Yu Yang 已提交
709

Y
Yu Yang 已提交
710 711 712 713 714 715 716
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    for (auto &pair : map) {
717 718 719 720 721 722
      bool is_persistable = member_->IsPersistable(pair.first);
      auto *feed_scope = is_persistable ? member_->local_scopes_[i]
                                        : member_->local_exec_scopes_[i];
      auto *feed_var = feed_scope->Var(pair.first);

      auto *trg = feed_var->GetMutable<LoDTensor>();
Y
Yu Yang 已提交
723 724 725 726 727 728 729 730
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
731
  size_t num_places = member_->places_.size();
732
  for (auto &pair : tensors) {
733 734 735 736
    bool is_persistable = member_->IsPersistable(pair.first);
    VLOG(3) << "Split " << (is_persistable ? "persistable" : "no persistable")
            << " data (" << pair.first << "), dim:" << pair.second.dims()
            << ", place: " << pair.second.place();
Y
Yu Yang 已提交
737
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
738 739
    bool is_cpu_place = platform::is_cpu_place(member_->places_.front());
    if (!is_persistable && num_places != lod_tensors.size()) {
C
chengduo 已提交
740
      auto error_info = string::Sprintf(
741 742 743
          "The number(%d) of samples[%s] of current batch is less than the "
          "count(%d) of devices(%s), currently, it is not allowed. ",
          lod_tensors.size(), pair.first, num_places,
C
chengduo 已提交
744 745 746 747 748 749 750
          (is_cpu_place ? "CPU" : "GPU"));
      if (is_cpu_place) {
        error_info +=
            "You should set the environment variable CPU_NUM in the system "
            "to determine the number of devices you need.";
      }
      PADDLE_THROW(error_info);
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
    } else if (is_persistable) {
      if (lod_tensors.size() == 1) {
        lod_tensors.reserve(num_places);
        auto &tensor = lod_tensors.front();
        PADDLE_ENFORCE_EQ(tensor.dims(), pair.second.dims(),
                          "The dim doesn't match.");
        PADDLE_ENFORCE_EQ(tensor.place(), member_->places_.at(0),
                          "The place doesn't match.");
        for (size_t i = 1; i < num_places; ++i) {
          lod_tensors.emplace_back();
          auto &tmp = lod_tensors.back();
          framework::TensorCopy(pair.second, member_->places_.at(i), &tmp);
        }
      }
      if (lod_tensors.size() != num_places) {
        auto error_info = string::Sprintf(
            "The number(%d) of samples[%s] of the current batch does not match "
            "the count(%d) of devices(%s). Because that %s is a persistable "
            "variable, you can feed just one sample, in that case, the input "
            "sample will be copied in %d copies and be sent to different "
            "places separately. If you need that different place has different "
            "value, you should feed %d samples.",
            lod_tensors.size(), pair.first, num_places,
            (is_cpu_place ? "CPU" : "GPU"), pair.first, num_places, num_places);
        PADDLE_THROW(error_info);
      }
C
chengduo 已提交
777
    }
778

779
    for (size_t j = 0; j < num_places; ++j) {
780 781 782 783 784
      auto *feed_scope = is_persistable ? member_->local_scopes_[j]
                                        : member_->local_exec_scopes_[j];
      auto *feed_var = feed_scope->Var(pair.first);

      auto t = feed_var->GetMutable<LoDTensor>();
785 786
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
787 788 789 790
    }
  }
}

X
Xin Pan 已提交
791 792 793 794 795 796 797
ParallelExecutor::~ParallelExecutor() {
  for (auto &p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
  }
  delete member_;
}

798
bool ParallelExecutor::EnableParallelGraphExecution(
X
Xin Pan 已提交
799
    const ir::Graph &graph, const ExecutionStrategy &exec_strategy,
800
    const BuildStrategy &build_strategy) const {
801 802 803
  if (!FLAGS_enable_parallel_graph) {
    return false;
  }
804

Y
Yancey1989 已提交
805
  bool enable_parallel_graph = true;
806

X
Xin Pan 已提交
807 808 809 810 811 812 813 814 815 816 817 818 819
  for (ir::Node *node : graph.Nodes()) {
    if (node->IsVar() && node->Var()) {
      // TODO(Yancey1989): support sparse update in ParallelGraph mode.
      if (node->Var()->GetType() == proto::VarType::SELECTED_ROWS) {
        enable_parallel_graph = false;
        break;
      }
    } else if (node->IsOp() && node->Op()) {
      // TODO(Yancey1989): support pserver mode
      if (node->Op()->Type() == "send" || node->Op()->Type() == "recv") {
        enable_parallel_graph = false;
        break;
      }
820 821 822
    }
  }

823
  if (!member_->use_all_reduce_ || !member_->use_cuda_) {
Y
Yancey1989 已提交
824
    if (build_strategy.enable_sequential_execution_ ||
825
        exec_strategy.type_ == ExecutionStrategy::ExecutorType::kExperimental) {
Y
Yancey1989 已提交
826
      enable_parallel_graph = false;
827 828 829 830 831 832 833 834 835
    }
  }

#ifdef WIN32
  VLOG(1) << "Windows has no support to parallel graph, enable_parallel_graph "
             "would be forced to false.";
  enable_parallel_graph = false;
#endif

Y
Yancey1989 已提交
836
  return enable_parallel_graph;
837 838
}

Y
Yu Yang 已提交
839
}  // namespace framework
Y
Yang Yang 已提交
840
}  // namespace paddle
S
sneaxiy 已提交
841

S
sneaxiy 已提交
842
USE_PASS(reference_count_pass);
S
sneaxiy 已提交
843
USE_PASS(eager_deletion_pass);
844
USE_PASS(buffer_shared_inplace_pass);
845
USE_PASS(buffer_shared_cross_op_memory_reuse_pass);