parallel_executor.cc 32.7 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
D
dzhwinter 已提交
16
#include <algorithm>
Q
qingqing01 已提交
17
#include <memory>
C
chengduoZH 已提交
18
#include <string>
19
#include <tuple>
Q
Qiao Longfei 已提交
20
#include <utility>
Q
qiaolongfei 已提交
21
#include <vector>
Q
Qiao Longfei 已提交
22
#include "paddle/fluid/framework/details/async_ssa_graph_executor.h"
Y
yuyang18 已提交
23
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
24
#include "paddle/fluid/framework/details/multi_devices_helper.h"
25
#include "paddle/fluid/framework/details/op_handle_base.h"
Y
Yancey1989 已提交
26
#include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h"
Y
yuyang18 已提交
27
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
28
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
29 30
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_helper.h"
31
#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h"
32
#include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h"
33
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
34

35 36
DECLARE_bool(use_ngraph);

Y
Yu Yang 已提交
37
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
38
#include "gperftools/profiler.h"
Y
Yu Yang 已提交
39
#endif
Y
Yu Yang 已提交
40
DEFINE_string(pe_profile_fname, "",
Y
Yu Yang 已提交
41 42
              "Profiler filename for PE, which generated by gperftools."
              "Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable.");
43
DEFINE_bool(enable_parallel_graph, false,
Y
Yancey1989 已提交
44
            "Force disable parallel graph execution mode if set false.");
Y
Yu Yang 已提交
45

Y
Yang Yang 已提交
46
namespace paddle {
Y
Yu Yang 已提交
47 48
namespace framework {

Y
Yu Yang 已提交
49
static std::once_flag gProfileOnce;
Y
Yu Yang 已提交
50
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
51
static bool gProfileStarted = false;
Y
Yu Yang 已提交
52
#endif
53

Y
Yu Yang 已提交
54 55 56
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
57
      : places_(places) {
Y
Yu Yang 已提交
58
    if (!FLAGS_pe_profile_fname.empty()) {
Y
Yu Yang 已提交
59 60
      std::call_once(gProfileOnce, [] {
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
61
        ProfilerStart(FLAGS_pe_profile_fname.c_str());
Y
Yu Yang 已提交
62 63 64
        gProfileStarted = true;
#else
        LOG(WARNING) << "Paddle is not compiled with gperftools. "
65
          "FLAGS_pe_profile_fname will be ignored";
Y
Yu Yang 已提交
66 67 68 69
#endif
      });
    }
  }
Y
Yu Yang 已提交
70

71 72 73 74 75 76 77 78 79 80 81
  ~ParallelExecutorPrivate() {
    if (own_local_scope_) {
      for (size_t i = 1; i < local_scopes_.size(); ++i) {
        // Skip the first scope, since it is the global scope.
        Scope *local_scope = local_scopes_[i];
        if (global_scope_->HasKid(local_scope)) {
          global_scope_->DeleteScope(local_scope);
        }
      }
    }
  }
S
sneaxiy 已提交
82

83
  ir::Graph *ApplyMemoryOptimizePass(ir::Graph *graph);
S
sneaxiy 已提交
84 85 86

  inline bool HasGarbageCollectors() const { return !gcs_.empty(); }

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
  void InitNCCLCtxs(framework::Scope *scope, const BuildStrategy &bst) {
    VLOG(1) << "nccl comm num:" << bst.nccl_comm_num_ << ", nranks:" << nranks_
            << ", num_trainers:" << bst.num_trainers_
            << ", trainer_id:" << bst.trainer_id_;

    if (bst.use_hierarchical_allreduce_) {
      VLOG(1) << ", use_hierarchical_allreduce:"
              << bst.use_hierarchical_allreduce_ << ", inter_trainers_num:"
              << bst.hierarchical_allreduce_inter_nranks_
              << ", exter_trainers_num:"
              << bst.hierarchical_allreduce_exter_nranks_;
    }

    std::vector<ncclUniqueId *> flat_nccl_ids;
    if (nranks_ == 1) {
      // FIXME(gongwb): need not to create ncclid when nranks==1
104 105
      nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                               bst.trainer_id_);
106 107 108 109 110 111 112 113 114 115 116 117
      return;
    }

    if (bst.enable_parallel_graph_) {
      VLOG(1) << "use only one ncclid in pg model";

      ncclUniqueId *nccl_id = nullptr;

      std::string var_name = platform::GetFlatNCCLVarName(0);
      auto nccl_id_var = scope->FindVar(var_name);
      if (nccl_id_var) {
        nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
118
        VLOG(10) << "find nccl_id_var:" << var_name << ", nccl_id:" << nccl_id;
119 120 121
      } else {
        nccl_id = new ncclUniqueId();
        PADDLE_ENFORCE(platform::dynload::ncclGetUniqueId(nccl_id));
122 123
        VLOG(10) << "can't find nccl_id_var:" << var_name
                 << ", nccl_id:" << nccl_id;
124 125 126 127
      }

      flat_nccl_ids.push_back(nccl_id);

128 129
      nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                               bst.trainer_id_);
130 131 132 133 134 135
      VLOG(1) << "init bst nccl context complete!";
      return;
    }

    // num_trainers ==1 && places > 1
    if (bst.num_trainers_ == 1) {
136 137
      nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                               bst.trainer_id_);
138 139 140 141 142 143 144 145 146 147 148
      return;
    }

    for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
      std::string var_name = platform::GetFlatNCCLVarName(i);
      auto nccl_id_var = scope->FindVar(var_name);
      PADDLE_ENFORCE(nccl_id_var, "can't find %s nccl_id_var", var_name);
      auto nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
      flat_nccl_ids.push_back(nccl_id);
    }

149 150
    nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                             bst.trainer_id_);
151 152

    if (bst.use_hierarchical_allreduce_) {
G
gongweibao 已提交
153 154 155 156 157 158 159 160
      std::vector<ncclUniqueId *> inter_nccl_ids;
      for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
        std::string var_name = platform::GetHierarchicalInterNCCLVarName(i);
        auto nccl_id_var = scope->FindVar(var_name);
        PADDLE_ENFORCE(nccl_id_var, "can't find %s nccl_id_var", var_name);
        auto inter_nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
        inter_nccl_ids.push_back(inter_nccl_id);
      }
161 162 163 164 165 166 167 168 169

      std::vector<ncclUniqueId *> exter_nccl_ids;
      for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
        std::string var_name = platform::GetHierarchicalExterNCCLVarName(i);
        auto nccl_id_var = scope->FindVar(var_name);
        PADDLE_ENFORCE(nccl_id_var, "can't find %s nccl_id_var", var_name);
        auto nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
        exter_nccl_ids.push_back(nccl_id);
      }
G
gongweibao 已提交
170

171 172 173 174
      nccl_ctxs_->InitHierarchicalCtxs(
          places_, inter_nccl_ids, exter_nccl_ids, bst.num_trainers_,
          bst.trainer_id_, bst.hierarchical_allreduce_inter_nranks_,
          bst.hierarchical_allreduce_exter_nranks_);
175 176
    }
  }
177

178
  void InitOrGetNCCLCommunicator(framework::Scope *scope, BuildStrategy *bst) {
179 180 181 182 183 184 185 186 187 188 189
    const std::string var_name = "NCCLCommunicator";
    auto var = scope->FindVar(var_name);
    if (var != nullptr) {
      PADDLE_ENFORCE(var->IsInitialized(),
                     "if %s exists, it must be initialized", var_name);
      VLOG(1) << "find " << var_name
              << " in scope, so use it and does not recreate!";
      nccl_ctxs_ = var->GetMutable<platform::NCCLCommunicator>();
      return;
    }

190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
    if (bst->use_hierarchical_allreduce_) {
      PADDLE_ENFORCE(bst->num_trainers_ > 1, "num_trainers:%llu < 1",
                     bst->num_trainers_);
      PADDLE_ENFORCE(bst->hierarchical_allreduce_inter_nranks_ > 1,
                     "inter_nranks:%d < 1",
                     bst->hierarchical_allreduce_inter_nranks_);
      PADDLE_ENFORCE(
          (bst->num_trainers_ % bst->hierarchical_allreduce_inter_nranks_ == 0),
          "num_trainers:%llu mod inter_nranks:%d != 0", bst->num_trainers_,
          bst->hierarchical_allreduce_inter_nranks_);

      bst->hierarchical_allreduce_exter_nranks_ =
          bst->num_trainers_ / bst->hierarchical_allreduce_inter_nranks_;
    }

205 206
    VLOG(1) << "not find " << var_name << " in scope, so recreate it!";
    nccl_ctxs_ = scope->Var(var_name)->GetMutable<platform::NCCLCommunicator>();
207
    InitNCCLCtxs(scope, *bst);
208
  }
209 210
#endif

211 212 213 214 215
  inline bool IsPersistable(const std::string &name) const {
    auto iter = is_persistable_.find(name);
    return iter != is_persistable_.end() && iter->second;
  }

D
dzhwinter 已提交
216
  BuildStrategy build_strategy_;
Y
Yu Yang 已提交
217 218
  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
219
  std::vector<Scope *> local_exec_scopes_;
220
  Scope *global_scope_;  // not owned
Y
Yu Yang 已提交
221
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
222

223 224
  std::unordered_map<std::string, bool> is_persistable_;

P
peizhilin 已提交
225
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
226
  platform::NCCLCommunicator *nccl_ctxs_{nullptr};
Y
Yu Yang 已提交
227
#endif
C
chengduoZH 已提交
228 229
  bool own_local_scope_;
  bool use_cuda_;
230
  bool use_all_reduce_;
231
  size_t nranks_;
S
sneaxiy 已提交
232

233
  ir::MemOptVarInfoMapList mem_opt_var_infos_;
234
  ir::GarbageCollectorMap gcs_;
Y
Yu Yang 已提交
235 236
};

237
ir::Graph *ParallelExecutorPrivate::ApplyMemoryOptimizePass(ir::Graph *graph) {
238 239 240 241 242 243 244
  if (FLAGS_use_ngraph) {
    LOG_FIRST_N(WARNING, 1)
        << "FLAGS_use_ngraph=True, memory optimization strategy is "
           "disabled in ParallelExecutor";
    return graph;
  }

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
  std::vector<ir::LastLiveOpsOfVars> last_live_ops_of_vars;

  auto ref_cnt_pass = ir::PassRegistry::Instance().Get("reference_count_pass");
  ref_cnt_pass->SetNotOwned(ir::kMemOptVarInfoMapList, &mem_opt_var_infos_);
  ref_cnt_pass->SetNotOwned(ir::kLastLiveOpsOfVars, &last_live_ops_of_vars);
  graph = ref_cnt_pass->Apply(graph);
  VLOG(10) << "ReferenceCountPass Applied";

  if (build_strategy_.enable_inplace_) {
    auto inplace_pass =
        ir::PassRegistry::Instance().Get("buffer_shared_inplace_pass");
    inplace_pass->SetNotOwned(ir::kMemOptVarInfoMapList, &mem_opt_var_infos_);
    inplace_pass->SetNotOwned(ir::kLastLiveOpsOfVars, &last_live_ops_of_vars);
    inplace_pass->SetNotOwned(ir::kUseCuda, &use_cuda_);
    VLOG(10) << "Start to apply buffer_shared_inplace_pass";
    graph = inplace_pass->Apply(graph);
    VLOG(10) << "buffer_shared_inplace_pass Applied";
Z
Zeng Jinle 已提交
262 263
    LOG(INFO) << "Inplace strategy is enabled, when "
                 "build_strategy.enable_inplace = True";
264 265
  }

266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
  /**
   * NOTE(zengjinle): If BuildStrategy.memory_optimize = None in Python,
   * set BuildStrategy.memory_optimize according to whether gc is enabled.
   * If gc is enabled, BuildStrategy.memory_optimize = False.
   * If gc is disabled, BuildStrategy.memory_optimize = True.
   * This is because gc+memory_optimize is worse than gc only.
   *
   * As an option, users can enable BuildStrategy.memory_optimize forcely
   * by setting True, and disable it forcely by setting False.
   */
  bool is_gc_enabled = (GetEagerDeletionThreshold() >= 0);
  if (!build_strategy_.memory_optimize_) {
    build_strategy_.memory_optimize_ = !is_gc_enabled;
  }

  if (build_strategy_.memory_optimize_.get()) {
282 283 284 285 286 287 288 289 290 291
    auto cross_op_memory_reuse_pass = ir::PassRegistry::Instance().Get(
        "buffer_shared_cross_op_memory_reuse_pass");
    cross_op_memory_reuse_pass->SetNotOwned(ir::kMemOptVarInfoMapList,
                                            &mem_opt_var_infos_);
    cross_op_memory_reuse_pass->SetNotOwned(ir::kLastLiveOpsOfVars,
                                            &last_live_ops_of_vars);
    cross_op_memory_reuse_pass->SetNotOwned(ir::kUseCuda, &use_cuda_);
    VLOG(10) << "Start to apply buffer_shared_cross_op_memory_reuse_pass";
    graph = cross_op_memory_reuse_pass->Apply(graph);
    VLOG(10) << "buffer_shared_cross_op_memory_reuse_pass Applied";
Z
Zeng Jinle 已提交
292 293 294
    LOG(INFO) << "Cross op memory reuse strategy is enabled, when "
                 "build_strategy.memory_optimize = True or garbage collection "
                 "strategy is disabled, which is not recommended";
295
  }
296

297
  if (!is_gc_enabled) {
298 299 300 301
    return graph;
  }
  size_t max_memory_size = static_cast<size_t>(GetEagerDeletionThreshold());

S
sneaxiy 已提交
302 303 304 305 306
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &place = places_[i];
    if (gcs_.count(place) > 0) {
      continue;
    }
S
sneaxiy 已提交
307
    std::unique_ptr<GarbageCollector> gc;
S
sneaxiy 已提交
308
#ifdef PADDLE_WITH_CUDA
S
sneaxiy 已提交
309 310
    if (platform::is_gpu_place(place)) {
      if (IsFastEagerDeletionModeEnabled()) {
S
sneaxiy 已提交
311 312
        gc.reset(new UnsafeFastGPUGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
313
      } else {
S
sneaxiy 已提交
314 315
        gc.reset(new StreamGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
316 317
      }
      VLOG(10) << "Created " << i << "-th GarbageCollector at " << place;
S
sneaxiy 已提交
318
    } else {
S
sneaxiy 已提交
319
#endif
S
sneaxiy 已提交
320 321 322 323 324 325 326
      if (platform::is_cpu_place(place)) {
        gc.reset(new CPUGarbageCollector(boost::get<platform::CPUPlace>(place),
                                         max_memory_size));
        VLOG(10) << "Created GarbageCollector at " << place;
      } else {
        PADDLE_THROW("Unsupported place for garbage collection");
      }
S
sneaxiy 已提交
327 328 329 330
#ifdef PADDLE_WITH_CUDA
    }
#endif

S
sneaxiy 已提交
331
    gcs_.emplace(place, std::move(gc));
S
sneaxiy 已提交
332 333
  }

S
sneaxiy 已提交
334
  if (!gcs_.empty()) {
S
sneaxiy 已提交
335 336
    auto eager_deletion_pass =
        ir::PassRegistry::Instance().Get("eager_deletion_pass");
337 338
    eager_deletion_pass->SetNotOwned(ir::kMemOptVarInfoMapList,
                                     &mem_opt_var_infos_);
339 340
    eager_deletion_pass->SetNotOwned(ir::kGarbageCollector, &gcs_);
    eager_deletion_pass->SetNotOwned(ir::kLastLiveOpsOfVars,
S
sneaxiy 已提交
341
                                     &last_live_ops_of_vars);
342
    eager_deletion_pass->SetNotOwned(ir::kAllPlaces, &places_);
343
    graph = eager_deletion_pass->Apply(graph);
S
sneaxiy 已提交
344
    VLOG(10) << "EagerDeletionPass Applied";
345 346 347
    LOG(INFO) << "Garbage collection strategy is enabled, when "
              << "FLAGS_eager_delete_tensor_gb = "
              << (static_cast<double>(GetEagerDeletionThreshold()) / (1 << 30));
S
sneaxiy 已提交
348 349 350 351
  }
  return graph;
}

352 353 354 355
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

356 357 358 359 360 361 362 363 364 365 366 367 368 369
void ParallelExecutor::DropLocalExeScopes() {
  auto executor = dynamic_cast<details::ScopeBufferedSSAGraphExecutor *>(
      member_->executor_.get());
  if (executor) {
    executor->DropLocalExeScopes();
  }
}

bool ParallelExecutor::NeedCreateLocalExeScope() {
  auto executor = dynamic_cast<details::ScopeBufferedSSAGraphExecutor *>(
      member_->executor_.get());
  return executor && executor->NeedCreateLocalExeScope();
}

Y
Yan Xu 已提交
370 371 372 373 374 375 376 377
ParallelExecutor::ParallelExecutor(const std::vector<platform::Place> &places,
                                   const std::vector<std::string> &bcast_vars,
                                   const std::string &loss_var_name,
                                   Scope *scope,
                                   const std::vector<Scope *> &local_scopes,
                                   const ExecutionStrategy &exec_strategy,
                                   const BuildStrategy &build_strategy,
                                   ir::Graph *graph)
Y
Yu Yang 已提交
378
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
379
  member_->global_scope_ = scope;
380
  member_->use_cuda_ = exec_strategy.use_cuda_;
D
dzhwinter 已提交
381
  member_->build_strategy_ = build_strategy;
C
chengduo 已提交
382 383
  member_->use_all_reduce_ = member_->build_strategy_.reduce_ ==
                             BuildStrategy::ReduceStrategy::kAllReduce;
X
Xin Pan 已提交
384
  member_->nranks_ = build_strategy.num_trainers_ * places.size();
C
chengduo 已提交
385 386 387 388 389 390 391
  if (!member_->use_all_reduce_ && member_->nranks_ == 1) {
    LOG(INFO) << "If you set build_strategy.reduce with 'Reduce',"
                 "the number of places should be greater than 1.";
    member_->build_strategy_.reduce_ =
        BuildStrategy::ReduceStrategy::kAllReduce;
    member_->use_all_reduce_ = true;
  }
392 393 394 395 396
#if defined(PADDLE_WITH_CUDA) && defined(_WIN32)
  if (member_->use_cuda_) {
    PADDLE_ENFORCE(places.size() == 1, "Windows can support Single GPU only.");
  }
#endif
Y
Yancey1989 已提交
397

398
  LOG(INFO) << string::Sprintf(
C
chengduo 已提交
399 400 401 402 403
      "The number of %s, which is used in ParallelExecutor, is %lu. And "
      "the Program will be copied %lu copies",
      (member_->use_cuda_ ? "CUDAPlace" : "CPUPlace"), places.size(),
      places.size());

404
  // Step 1. Bcast the bcast_vars to devs.
Y
Yu Yang 已提交
405
  // Create local scopes
406
  if (local_scopes.empty()) {
C
chengduoZH 已提交
407
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
408 409
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
410
      member_->local_scopes_.emplace_back(&scope->NewScope());
411 412
    }
  } else {
C
chengduoZH 已提交
413
    member_->own_local_scope_ = false;
414 415
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
416
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
417
    }
Y
Yu Yang 已提交
418 419
  }

Q
Qiao Longfei 已提交
420
  std::vector<ir::Graph *> graphs;
C
chengduo 已提交
421
  if (member_->build_strategy_.async_mode_) {
Q
Qiao Longfei 已提交
422 423
    PADDLE_ENFORCE(!member_->use_cuda_,
                   "gpu mode does not support async_mode_ now!");
Q
Qiao Longfei 已提交
424
    graphs.push_back(graph);
D
dongdaxiang 已提交
425
    for (size_t i = 1; i < places.size(); ++i) {
Q
Qiao Longfei 已提交
426 427 428 429
      auto *tmp_graph = new ir::Graph(graph->OriginProgram());
      async_graphs_.emplace_back(tmp_graph);
      graphs.push_back(tmp_graph);
    }
Q
Qiao Longfei 已提交
430
  }
Q
Qiao Longfei 已提交
431

Y
Yancey1989 已提交
432 433 434
  // FIXME(Yancey1989): parallel graph mode get better performance
  // in GPU allreduce distributed training. Need an elegant way to
  // choice the execution strategy.
C
chengduo 已提交
435 436 437 438
  member_->build_strategy_.enable_parallel_graph_ =
      EnableParallelGraphExecution(*graph, exec_strategy,
                                   member_->build_strategy_);
  if (member_->build_strategy_.enable_parallel_graph_) {
439 440 441 442
    LOG(INFO) << "The Executor would execute the graph by ParallelGraph "
                 "Execution which can get better performance,"
              << "you can force it off by env FLAGS_enable_parallel_graph=0";
  }
Y
Yancey1989 已提交
443

444
  if (member_->use_cuda_ && member_->nranks_ > 1) {
P
peizhilin 已提交
445
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
446
    member_->InitOrGetNCCLCommunicator(scope, &member_->build_strategy_);
Q
qingqing01 已提交
447

W
Wu Yi 已提交
448 449 450
    // Initialize device context's nccl comm, will be used by normal
    // Operators like sync_batch_norm, and collective ops.
    // NOTE: more than one ParallelExecutor with same place, the nccl comm will
Q
qingqing01 已提交
451
    // be rewrite and there will be some problem.
W
Wu Yi 已提交
452 453 454
    // NOTE: NCCL group-calls and non-group-calls can not use the same
    // NCCL communicator, so for ParallelGraph and Multi-Process mode, re-use
    // same communicators.
455 456
    auto *nccl_ctxs =
        member_->nccl_ctxs_->GetSyncBatchNormCtx(scope, member_->places_);
457
    auto &pool = platform::DeviceContextPool::Instance();
Q
qingqing01 已提交
458 459 460
    for (size_t dev_id = 0; dev_id < member_->places_.size(); ++dev_id) {
      auto *dev_ctx = static_cast<platform::CUDADeviceContext *>(
          pool.Get(member_->places_[dev_id]));
461
      auto &nccl_ctx = nccl_ctxs->at(member_->places_[dev_id]);
462
      dev_ctx->set_nccl_comm(nccl_ctx.comm());
Q
qingqing01 已提交
463
    }
Y
Yu Yang 已提交
464
#endif
C
chengduoZH 已提交
465
  }
Y
Yan Xu 已提交
466 467
  // broadcast parameters from the 0th device to others:
  auto need_broadcast = [&]() -> bool {
C
chengduo 已提交
468
    if (member_->build_strategy_.num_trainers_ > 1) {
Y
Yan Xu 已提交
469 470 471 472 473 474 475 476 477
      // 1. num_tariners would be grater than 1 for nccl distributed training.
      return true;
    } else if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
      // 2. Only one trainer process, but ParallelExecutor hold multiple
      // devices.
      return true;
    }
    return false;
  };
478
  // Bcast Parameters to all GPUs
Y
Yan Xu 已提交
479
  if (need_broadcast()) {
C
chengduo 已提交
480
    BCastParamsToDevices(bcast_vars, member_->build_strategy_.trainer_id_);
Y
Yu Yang 已提交
481
  }
482

Q
Qiao Longfei 已提交
483
  // Startup Program has been run. All local scopes has correct parameters.
Y
yuyang18 已提交
484

Q
Qiao Longfei 已提交
485 486 487
  // Step 2. Convert main_program to SSA form and dependency graph. Also, insert
  // ncclOp
  std::vector<ir::Graph *> async_graphs(places.size());
P
peizhilin 已提交
488
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
C
chengduo 已提交
489
  if (member_->build_strategy_.async_mode_) {
Q
Qiao Longfei 已提交
490
    VLOG(3) << "use local async mode";
C
chengduo 已提交
491 492 493 494
    graph = member_->build_strategy_.Apply(
        graph, {member_->places_[0]}, loss_var_name,
        {member_->local_scopes_[0]}, 1, member_->use_cuda_,
        member_->nccl_ctxs_);
D
dongdaxiang 已提交
495
    for (size_t i = 1; i < member_->places_.size(); ++i) {
C
chengduo 已提交
496 497 498 499
      graphs[i] = member_->build_strategy_.Apply(
          graphs[i], {member_->places_[i]}, loss_var_name,
          {member_->local_scopes_[i]}, 1, member_->use_cuda_,
          member_->nccl_ctxs_);
500
      async_graphs[i] = graphs[i];
Q
Qiao Longfei 已提交
501
    }
Q
Qiao Longfei 已提交
502
  } else {
C
chengduo 已提交
503 504 505
    graph = member_->build_strategy_.Apply(
        graph, member_->places_, loss_var_name, member_->local_scopes_,
        member_->nranks_, member_->use_cuda_, member_->nccl_ctxs_);
Q
Qiao Longfei 已提交
506
  }
C
chengduoZH 已提交
507
#else
C
chengduo 已提交
508
  if (member_->build_strategy_.async_mode_) {
Q
Qiao Longfei 已提交
509
    VLOG(3) << "use local async mode";
C
chengduo 已提交
510 511 512
    graph = member_->build_strategy_.Apply(
        graph, {member_->places_[0]}, loss_var_name,
        {member_->local_scopes_[0]}, 1, member_->use_cuda_);
513
    for (size_t i = 1; i < member_->places_.size(); ++i) {
C
chengduo 已提交
514
      graphs[i] = member_->build_strategy_.Apply(
515
          graphs[i], {member_->places_[i]}, loss_var_name,
Q
Qiao Longfei 已提交
516
          {member_->local_scopes_[i]}, 1, member_->use_cuda_);
517
      async_graphs[i] = graphs[i];
Q
Qiao Longfei 已提交
518
    }
Q
can run  
Qiao Longfei 已提交
519
  } else {
C
chengduo 已提交
520 521 522
    graph = member_->build_strategy_.Apply(
        graph, member_->places_, loss_var_name, member_->local_scopes_,
        member_->nranks_, member_->use_cuda_);
Q
can run  
Qiao Longfei 已提交
523
  }
Y
Yu Yang 已提交
524
#endif
525

526
  graph = member_->ApplyMemoryOptimizePass(graph);
Y
Yancey1989 已提交
527

Q
Qiao Longfei 已提交
528 529
  async_graphs[0] = graph;

530 531
  // Step 3. Create vars in each scope. Passes may also create new vars.
  //         skip control vars and empty vars
Y
Yancey1989 已提交
532
  std::vector<details::VariableInfo> var_infos;
Q
Qiao Longfei 已提交
533 534 535 536 537 538
  for (auto &node : graph->Nodes()) {
    if (node->IsVar() && !node->IsCtrlVar() && node->Var()) {
      var_infos.emplace_back();
      var_infos.back().name_ = node->Var()->Name();
      var_infos.back().type_ = node->Var()->GetType();
      var_infos.back().persistable_ = node->Var()->Persistable();
539 540 541

      member_->is_persistable_.emplace(node->Var()->Name(),
                                       node->Var()->Persistable());
Y
Yancey1989 已提交
542 543
    }
  }
Y
Yancey1989 已提交
544

W
Wu Yi 已提交
545 546
  // If the loss_var_name is given, the number of graph should be only one.
  if (loss_var_name.size()) {
Q
Qiao Longfei 已提交
547
    size_t graph_num = ir::GraphNum(*graph);
C
chengduo 已提交
548 549 550 551
    if (graph_num > 1) {
      LOG(WARNING)
          << "The number of graph should be only one, "
             "but the current graph has "
Q
Qiao Longfei 已提交
552
          << ir::GraphNum(*graph)
C
chengduo 已提交
553 554 555 556 557
          << " sub_graphs. If you want to see the nodes of the "
             "sub_graphs, you should use 'FLAGS_print_sub_graph_dir' "
             "to specify the output dir. NOTES: if you not do training, "
             "please don't pass loss_var_name.";
    }
W
Wu Yi 已提交
558 559
  }

560 561 562 563 564 565 566 567 568 569 570 571
  std::unordered_map<Scope *, Scope *> scope_map;
  for (auto *scope : member_->local_scopes_) {
    auto &local_exec_scope = scope->NewScope();
    member_->local_exec_scopes_.emplace_back(&local_exec_scope);
    scope_map.emplace(scope, &local_exec_scope);
  }

  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(),
                    member_->local_exec_scopes_.size());

  std::vector<ir::Graph *> final_graphs;

C
chengduo 已提交
572
  if (member_->build_strategy_.async_mode_) {
Q
can run  
Qiao Longfei 已提交
573 574
    VLOG(3) << "use AsyncSSAGraphExecutor";
    member_->executor_.reset(new details::AsyncSSAGraphExecutor(
575 576 577
        exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
        member_->places_, async_graphs));
    final_graphs = async_graphs;
C
chengduo 已提交
578
  } else if (member_->build_strategy_.enable_parallel_graph_) {
Q
can run  
Qiao Longfei 已提交
579
    VLOG(3) << "use ParallelSSAGraphExecutor";
Y
Yancey1989 已提交
580
#ifdef PADDLE_WITH_CUDA
Y
Yancey1989 已提交
581 582
    // TODO(Yancey1989): Remove passing in the main_program when
    // allreduce_seq_pass doesn't need it as the attr.
583 584 585 586 587
    auto *pg_exe = new details::ParallelSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
        member_->places_, graph);
    final_graphs = pg_exe->Graphs();
    member_->executor_.reset(pg_exe);
Y
Yancey1989 已提交
588 589 590 591
#else
    PADDLE_THROW(
        "Paddle should be compiled with CUDA for ParallelGraph Execution.");
#endif
Y
yuyang18 已提交
592
  } else {
Y
Yancey1989 已提交
593
    if (exec_strategy.type_ == ExecutionStrategy::kDefault) {
Q
can run  
Qiao Longfei 已提交
594
      VLOG(3) << "use ThreadedSSAGraphExecutor";
Y
Yancey1989 已提交
595
      member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
596 597
          exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
          member_->places_, graph));
Y
Yancey1989 已提交
598
    } else {
Q
can run  
Qiao Longfei 已提交
599
      VLOG(3) << "use FastThreadedSSAGraphExecutor";
Y
Yancey1989 已提交
600
      member_->executor_.reset(new details::FastThreadedSSAGraphExecutor(
601 602
          exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
          member_->places_, graph));
Y
Yancey1989 已提交
603
    }
604
    final_graphs.emplace_back(graph);
C
chengduoZH 已提交
605
  }
Y
yuyang18 已提交
606

Q
can run  
Qiao Longfei 已提交
607
  VLOG(3) << "use ScopeBufferedSSAGraphExecutor";
C
chengduo 已提交
608
  if (!member_->build_strategy_.async_mode_) {
Q
Qiao Longfei 已提交
609
    member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
610 611 612 613 614 615 616 617 618
        exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
        std::move(var_infos), member_->places_, std::move(member_->executor_)));
  }

  for (auto *g : final_graphs) {
    auto ops = ir::FilterByNodeWrapper<details::OpHandleBase>(*g);
    for (auto *op : ops) {
      op->SetLocalExecScopes(scope_map);
    }
Q
Qiao Longfei 已提交
619
  }
Y
Yu Yang 已提交
620 621
}

Y
Yancey1989 已提交
622
void ParallelExecutor::BCastParamsToDevices(
Y
Yan Xu 已提交
623
    const std::vector<std::string> &vars, int trainer_id) const {
Q
Qiao Longfei 已提交
624
  VLOG(3) << "BCastParamsToDevices";
X
Xin Pan 已提交
625
  // the initializing bcast, all vars would be bcast from device(0).
626
  for (auto &var : vars) {
X
Xin Pan 已提交
627
    framework::Variable *main_var = member_->local_scopes_[0]->FindVar(var);
J
JiayiFeng 已提交
628
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
629 630 631 632
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
633
    if (!main_tensor.IsInitialized()) {
M
minqiyang 已提交
634
      VLOG(3) << "one in var not inited, return!";
635 636
      continue;
    }
637 638
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
P
peizhilin 已提交
639
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
640
      std::vector<void *> buffers;
C
chengduo 已提交
641
      buffers.reserve(member_->places_.size());
642 643 644 645 646
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
647

Y
Yan Xu 已提交
648
        if (i == 0 && trainer_id == 0) {
649 650
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
651
          auto local_scope = member_->local_scopes_[i];
652
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
653
          t->Resize(dims);
654
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
655
        }
656
        buffers.push_back(buffer);
657
      }
658

659 660 661
      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
                        "variables' buffer size to bcast NOT equal to places");
      {
662
        auto *nccl_ctxs = member_->nccl_ctxs_->DefaultFlatCtx();
663 664
        platform::NCCLGroupGuard guard;
        for (size_t i = 0; i < member_->places_.size(); ++i) {
665
          auto &nccl_ctx = nccl_ctxs->at(member_->places_[i]);
X
Xin Pan 已提交
666 667
          platform::dynload::ncclBcast(buffers[i], numel, data_type, 0,
                                       nccl_ctx.comm_, nccl_ctx.stream());
668
        }
669
        nccl_ctxs->WaitAll();
670
      }
C
chengduoZH 已提交
671
#endif
672 673
    } else {
      platform::CPUPlace cpu;
C
chengduo 已提交
674
      for (size_t i = 1; i < member_->places_.size(); ++i) {
675 676
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
C
chengduo 已提交
677

Q
Qiao Longfei 已提交
678
        auto copy_memory = [&] {
679 680 681
          t->Resize(dims);
          t->mutable_data(cpu, main_tensor.type());
          paddle::framework::TensorCopy(main_tensor, cpu, t);
Q
can run  
Qiao Longfei 已提交
682 683
        };

Q
Qiao Longfei 已提交
684
        auto share_memory = [&] { t->ShareDataWith(main_tensor); };
Q
can run  
Qiao Longfei 已提交
685 686 687 688 689 690 691

        // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
        if (member_->build_strategy_.async_mode_) {
          share_memory();
        } else if (member_->use_all_reduce_ || member_->use_cuda_ ||
                   var == "@LR_DECAY_COUNTER@") {
          copy_memory();
692
        } else {
Q
can run  
Qiao Longfei 已提交
693
          share_memory();
694
        }
Y
Yu Yang 已提交
695
      }
Y
Stash  
Yu Yang 已提交
696 697
    }
  }
Y
Yu Yang 已提交
698
}
Y
Yu Yang 已提交
699

700 701
FeedFetchList ParallelExecutor::Run(
    const std::vector<std::string> &fetch_tensors) {
702
  VLOG(3) << "enter ParallelExecutor Run";
Y
Yu Yang 已提交
703 704 705
#ifdef WITH_GPERFTOOLS
  if (gProfileStarted) {
    ProfilerFlush();
S
sneaxiy 已提交
706 707
  }
#endif
Y
Yu Yang 已提交
708

X
Xin Pan 已提交
709
  platform::RecordBlock b(0);
710 711 712

  ir::SkipMemOptVarsGuard guard(&(member_->mem_opt_var_infos_), fetch_tensors,
                                member_->HasGarbageCollectors());
713 714

  VLOG(3) << "ParallelExecutor begin to run member_->executor_->Run";
S
sneaxiy 已提交
715
  auto fetch_data = member_->executor_->Run(fetch_tensors);
716
  return fetch_data;
Y
Yu Yang 已提交
717
}
Y
Yu Yang 已提交
718

Y
Yu Yang 已提交
719 720 721 722 723 724 725
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    for (auto &pair : map) {
726 727 728 729 730 731
      bool is_persistable = member_->IsPersistable(pair.first);
      auto *feed_scope = is_persistable ? member_->local_scopes_[i]
                                        : member_->local_exec_scopes_[i];
      auto *feed_var = feed_scope->Var(pair.first);

      auto *trg = feed_var->GetMutable<LoDTensor>();
Y
Yu Yang 已提交
732 733 734 735 736 737 738 739
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
740
  size_t num_places = member_->places_.size();
741
  for (auto &pair : tensors) {
742 743 744 745
    bool is_persistable = member_->IsPersistable(pair.first);
    VLOG(3) << "Split " << (is_persistable ? "persistable" : "no persistable")
            << " data (" << pair.first << "), dim:" << pair.second.dims()
            << ", place: " << pair.second.place();
Y
Yu Yang 已提交
746
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
747 748
    bool is_cpu_place = platform::is_cpu_place(member_->places_.front());
    if (!is_persistable && num_places != lod_tensors.size()) {
C
chengduo 已提交
749
      auto error_info = string::Sprintf(
750 751 752
          "The number(%d) of samples[%s] of current batch is less than the "
          "count(%d) of devices(%s), currently, it is not allowed. ",
          lod_tensors.size(), pair.first, num_places,
C
chengduo 已提交
753 754 755 756 757 758 759
          (is_cpu_place ? "CPU" : "GPU"));
      if (is_cpu_place) {
        error_info +=
            "You should set the environment variable CPU_NUM in the system "
            "to determine the number of devices you need.";
      }
      PADDLE_THROW(error_info);
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
    } else if (is_persistable) {
      if (lod_tensors.size() == 1) {
        lod_tensors.reserve(num_places);
        auto &tensor = lod_tensors.front();
        PADDLE_ENFORCE_EQ(tensor.dims(), pair.second.dims(),
                          "The dim doesn't match.");
        PADDLE_ENFORCE_EQ(tensor.place(), member_->places_.at(0),
                          "The place doesn't match.");
        for (size_t i = 1; i < num_places; ++i) {
          lod_tensors.emplace_back();
          auto &tmp = lod_tensors.back();
          framework::TensorCopy(pair.second, member_->places_.at(i), &tmp);
        }
      }
      if (lod_tensors.size() != num_places) {
        auto error_info = string::Sprintf(
            "The number(%d) of samples[%s] of the current batch does not match "
            "the count(%d) of devices(%s). Because that %s is a persistable "
            "variable, you can feed just one sample, in that case, the input "
            "sample will be copied in %d copies and be sent to different "
            "places separately. If you need that different place has different "
            "value, you should feed %d samples.",
            lod_tensors.size(), pair.first, num_places,
            (is_cpu_place ? "CPU" : "GPU"), pair.first, num_places, num_places);
        PADDLE_THROW(error_info);
      }
C
chengduo 已提交
786
    }
787

788
    for (size_t j = 0; j < num_places; ++j) {
789 790 791 792 793
      auto *feed_scope = is_persistable ? member_->local_scopes_[j]
                                        : member_->local_exec_scopes_[j];
      auto *feed_var = feed_scope->Var(pair.first);

      auto t = feed_var->GetMutable<LoDTensor>();
794 795
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
796 797 798 799
    }
  }
}

X
Xin Pan 已提交
800 801 802 803 804 805 806
ParallelExecutor::~ParallelExecutor() {
  for (auto &p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
  }
  delete member_;
}

807
bool ParallelExecutor::EnableParallelGraphExecution(
X
Xin Pan 已提交
808
    const ir::Graph &graph, const ExecutionStrategy &exec_strategy,
809
    const BuildStrategy &build_strategy) const {
810 811 812
  if (!FLAGS_enable_parallel_graph) {
    return false;
  }
813

Y
Yancey1989 已提交
814
  bool enable_parallel_graph = true;
815

X
Xin Pan 已提交
816 817 818 819 820 821 822 823 824 825 826 827 828
  for (ir::Node *node : graph.Nodes()) {
    if (node->IsVar() && node->Var()) {
      // TODO(Yancey1989): support sparse update in ParallelGraph mode.
      if (node->Var()->GetType() == proto::VarType::SELECTED_ROWS) {
        enable_parallel_graph = false;
        break;
      }
    } else if (node->IsOp() && node->Op()) {
      // TODO(Yancey1989): support pserver mode
      if (node->Op()->Type() == "send" || node->Op()->Type() == "recv") {
        enable_parallel_graph = false;
        break;
      }
829 830 831
    }
  }

832
  if (!member_->use_all_reduce_ || !member_->use_cuda_) {
Y
Yancey1989 已提交
833
    if (build_strategy.enable_sequential_execution_ ||
834
        exec_strategy.type_ == ExecutionStrategy::ExecutorType::kExperimental) {
Y
Yancey1989 已提交
835
      enable_parallel_graph = false;
836 837 838 839 840 841 842 843 844
    }
  }

#ifdef WIN32
  VLOG(1) << "Windows has no support to parallel graph, enable_parallel_graph "
             "would be forced to false.";
  enable_parallel_graph = false;
#endif

Y
Yancey1989 已提交
845
  return enable_parallel_graph;
846 847
}

Y
Yu Yang 已提交
848
}  // namespace framework
Y
Yang Yang 已提交
849
}  // namespace paddle
S
sneaxiy 已提交
850

S
sneaxiy 已提交
851
USE_PASS(reference_count_pass);
S
sneaxiy 已提交
852
USE_PASS(eager_deletion_pass);
853
USE_PASS(buffer_shared_inplace_pass);
854
USE_PASS(buffer_shared_cross_op_memory_reuse_pass);