parallel_executor.cc 53.8 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
16

D
dzhwinter 已提交
17
#include <algorithm>
Q
qingqing01 已提交
18
#include <memory>
C
chengduoZH 已提交
19
#include <string>
20
#include <tuple>
Q
Qiao Longfei 已提交
21
#include <utility>
Q
qiaolongfei 已提交
22
#include <vector>
23

Q
Qiao Longfei 已提交
24
#include "paddle/fluid/framework/details/async_ssa_graph_executor.h"
25
#include "paddle/fluid/framework/details/bind_threaded_ssa_graph_executor.h"
Y
yuyang18 已提交
26
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
27
#include "paddle/fluid/framework/details/multi_devices_helper.h"
28
#include "paddle/fluid/framework/details/op_handle_base.h"
Y
Yancey1989 已提交
29
#include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h"
Y
yuyang18 已提交
30
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
31
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
32 33
#include "paddle/fluid/framework/ir/graph.h"
#include "paddle/fluid/framework/ir/graph_helper.h"
34
#include "paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h"
35
#include "paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass_helper.h"
36
#include "paddle/fluid/framework/ir/multi_devices_graph_pass/set_reader_device_info_utils.h"
W
wangchaochaohu 已提交
37
#include "paddle/fluid/platform/event.h"
38
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
39

40
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
41 42 43
#include "paddle/fluid/platform/cuda_device_guard.h"
#endif

44 45
DECLARE_double(eager_delete_tensor_gb);

Y
Yu Yang 已提交
46
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
47
#include "gperftools/profiler.h"
Y
Yu Yang 已提交
48
#endif
Y
Yu Yang 已提交
49
DEFINE_string(pe_profile_fname, "",
Y
Yu Yang 已提交
50 51
              "Profiler filename for PE, which generated by gperftools."
              "Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable.");
52
DEFINE_bool(enable_parallel_graph, false,
Y
Yancey1989 已提交
53
            "Force disable parallel graph execution mode if set false.");
Y
Yu Yang 已提交
54

Y
Yang Yang 已提交
55
namespace paddle {
Y
Yu Yang 已提交
56 57
namespace framework {

Y
Yu Yang 已提交
58
static std::once_flag gProfileOnce;
Y
Yu Yang 已提交
59
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
60
static bool gProfileStarted = false;
Y
Yu Yang 已提交
61
#endif
62

63
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
64 65 66
std::once_flag p2p_init_flag;
#endif

Y
Yu Yang 已提交
67 68
class ParallelExecutorPrivate {
 public:
69 70 71
  ParallelExecutorPrivate(const std::vector<platform::Place> &places,
                          Scope *global_scope)
      : places_(places), global_scope_(global_scope) {
Y
Yu Yang 已提交
72
    if (!FLAGS_pe_profile_fname.empty()) {
Y
Yu Yang 已提交
73 74
      std::call_once(gProfileOnce, [] {
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
75
        ProfilerStart(FLAGS_pe_profile_fname.c_str());
Y
Yu Yang 已提交
76 77 78
        gProfileStarted = true;
#else
        LOG(WARNING) << "Paddle is not compiled with gperftools. "
79
          "FLAGS_pe_profile_fname will be ignored";
Y
Yu Yang 已提交
80 81 82 83
#endif
      });
    }
  }
Y
Yu Yang 已提交
84

85 86 87 88 89 90 91 92 93 94 95
  ~ParallelExecutorPrivate() {
    if (own_local_scope_) {
      for (size_t i = 1; i < local_scopes_.size(); ++i) {
        // Skip the first scope, since it is the global scope.
        Scope *local_scope = local_scopes_[i];
        if (global_scope_->HasKid(local_scope)) {
          global_scope_->DeleteScope(local_scope);
        }
      }
    }
  }
S
sneaxiy 已提交
96

97
  bool IsUseCUDA(DeviceType use_device);
98

99 100 101 102
  void SetHasFeed(size_t dev_idx, bool has_feed = true);

  bool AllowPartialFeed() const;

103
  ir::Graph *ApplyMemoryOptimizePass(ir::Graph *graph);
S
sneaxiy 已提交
104 105 106

  inline bool HasGarbageCollectors() const { return !gcs_.empty(); }

107
  /**
T
tianshuo78520a 已提交
108 109
   * NOTE(zengjinle): the fed variables of users should not be reused,
   * because users may feed them into another network. Changing the fed
110 111 112 113 114 115
   * variables that users can visit may cause calculation wrong, which is
   * a very subtle bug when traning networks. However, these variables
   * can be garbage collected.
   *
   * ParallelExecutor provides 2 methods to feed variables:
   *
T
tianshuo78520a 已提交
116
   *  - FeedTensorsIntoLocalScopes: this method would share memory of fed
117 118
   *                                variables, so we have to skip these.
   *
T
tianshuo78520a 已提交
119
   *  - FeedAndSplitTensorIntoLocalScopes: this method would copy data of fed
120 121 122 123
   *                                       variables, so we do not need to skip
   *                                       them.
   */
  inline void SetSkipMemoryReuse(size_t scope_idx, const std::string &name) {
124 125 126 127 128
    if (mem_opt_var_infos_.size() == 0) {
      VLOG(4) << "The mem_opt_var_infos_ is empty, maybe no memory "
                 "optimization strategy is enabled";
      return;
    }
129 130 131 132 133 134
    auto iter = mem_opt_var_infos_[scope_idx].find(name);
    if (iter != mem_opt_var_infos_[scope_idx].end()) {
      iter->second->SetSkipMemoryReuse(true);
    }
  }

135
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
  void InitNCCLCtxs(framework::Scope *scope, const BuildStrategy &bst) {
    VLOG(1) << "nccl comm num:" << bst.nccl_comm_num_ << ", nranks:" << nranks_
            << ", num_trainers:" << bst.num_trainers_
            << ", trainer_id:" << bst.trainer_id_;

    if (bst.use_hierarchical_allreduce_) {
      VLOG(1) << ", use_hierarchical_allreduce:"
              << bst.use_hierarchical_allreduce_ << ", inter_trainers_num:"
              << bst.hierarchical_allreduce_inter_nranks_
              << ", exter_trainers_num:"
              << bst.hierarchical_allreduce_exter_nranks_;
    }

    std::vector<ncclUniqueId *> flat_nccl_ids;
    if (nranks_ == 1) {
      // FIXME(gongwb): need not to create ncclid when nranks==1
152 153
      nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                               bst.trainer_id_);
154 155 156 157 158 159 160 161 162 163 164 165
      return;
    }

    if (bst.enable_parallel_graph_) {
      VLOG(1) << "use only one ncclid in pg model";

      ncclUniqueId *nccl_id = nullptr;

      std::string var_name = platform::GetFlatNCCLVarName(0);
      auto nccl_id_var = scope->FindVar(var_name);
      if (nccl_id_var) {
        nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
166
        VLOG(10) << "find nccl_id_var:" << var_name << ", nccl_id:" << nccl_id;
167 168
      } else {
        nccl_id = new ncclUniqueId();
169 170
        PADDLE_ENFORCE_EQ(
            platform::dynload::ncclGetUniqueId(nccl_id), ncclSuccess,
171 172 173
            platform::errors::PreconditionNotMet(
                "PaddlePaddle failed to get NCCL unique ID. It may due to your "
                "system settings or NCCL library error, please debug on NCCL"));
174 175
        VLOG(10) << "can't find nccl_id_var:" << var_name
                 << ", nccl_id:" << nccl_id;
176 177 178 179
      }

      flat_nccl_ids.push_back(nccl_id);

180 181
      nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                               bst.trainer_id_);
182 183 184 185 186 187
      VLOG(1) << "init bst nccl context complete!";
      return;
    }

    // num_trainers ==1 && places > 1
    if (bst.num_trainers_ == 1) {
188 189
      nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                               bst.trainer_id_);
190 191 192 193 194 195
      return;
    }

    for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
      std::string var_name = platform::GetFlatNCCLVarName(i);
      auto nccl_id_var = scope->FindVar(var_name);
196 197 198
      PADDLE_ENFORCE_NOT_NULL(
          nccl_id_var,
          platform::errors::NotFound("Can't find nccl_id_var '%s'.", var_name));
199 200 201 202
      auto nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
      flat_nccl_ids.push_back(nccl_id);
    }

203 204
    nccl_ctxs_->InitFlatCtxs(places_, flat_nccl_ids, bst.num_trainers_,
                             bst.trainer_id_);
205 206

    if (bst.use_hierarchical_allreduce_) {
G
gongweibao 已提交
207 208 209 210
      std::vector<ncclUniqueId *> inter_nccl_ids;
      for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
        std::string var_name = platform::GetHierarchicalInterNCCLVarName(i);
        auto nccl_id_var = scope->FindVar(var_name);
211 212 213
        PADDLE_ENFORCE_NOT_NULL(nccl_id_var,
                                platform::errors::NotFound(
                                    "Can't find nccl_id_var '%s'.", var_name));
G
gongweibao 已提交
214 215 216
        auto inter_nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
        inter_nccl_ids.push_back(inter_nccl_id);
      }
217 218 219 220 221

      std::vector<ncclUniqueId *> exter_nccl_ids;
      for (int i = 0; i < static_cast<int>(bst.nccl_comm_num_); i++) {
        std::string var_name = platform::GetHierarchicalExterNCCLVarName(i);
        auto nccl_id_var = scope->FindVar(var_name);
222 223 224
        PADDLE_ENFORCE_NOT_NULL(nccl_id_var,
                                platform::errors::NotFound(
                                    "Can't find nccl_id_var '%s'.", var_name));
225 226 227
        auto nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
        exter_nccl_ids.push_back(nccl_id);
      }
G
gongweibao 已提交
228

229 230 231 232
      nccl_ctxs_->InitHierarchicalCtxs(
          places_, inter_nccl_ids, exter_nccl_ids, bst.num_trainers_,
          bst.trainer_id_, bst.hierarchical_allreduce_inter_nranks_,
          bst.hierarchical_allreduce_exter_nranks_);
233 234
    }
  }
235

236
  void InitOrGetNCCLCommunicator(framework::Scope *scope, BuildStrategy *bst) {
237 238 239
    const std::string var_name = "NCCLCommunicator";
    auto var = scope->FindVar(var_name);
    if (var != nullptr) {
240 241 242
      PADDLE_ENFORCE_EQ(var->IsInitialized(), true,
                        platform::errors::PreconditionNotMet(
                            "if %s exists, it must be initialized", var_name));
243 244 245 246 247 248
      VLOG(1) << "find " << var_name
              << " in scope, so use it and does not recreate!";
      nccl_ctxs_ = var->GetMutable<platform::NCCLCommunicator>();
      return;
    }

249
    if (bst->use_hierarchical_allreduce_) {
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
      PADDLE_ENFORCE_GT(
          bst->num_trainers_, 1,
          platform::errors::PreconditionNotMet(
              "The num_trainers should be greater than 1, but received %llu.",
              bst->num_trainers_));
      PADDLE_ENFORCE_GT(
          bst->hierarchical_allreduce_inter_nranks_, 1,
          platform::errors::PreconditionNotMet(
              "The inter_nranks should be greater than 1, but received %d.",
              bst->hierarchical_allreduce_inter_nranks_));
      PADDLE_ENFORCE_EQ(
          bst->num_trainers_ % bst->hierarchical_allreduce_inter_nranks_, 0,
          platform::errors::PreconditionNotMet(
              "num_trainers:%llu mod inter_nranks:%d != 0", bst->num_trainers_,
              bst->hierarchical_allreduce_inter_nranks_));
265 266 267 268 269

      bst->hierarchical_allreduce_exter_nranks_ =
          bst->num_trainers_ / bst->hierarchical_allreduce_inter_nranks_;
    }

270 271
    VLOG(1) << "not find " << var_name << " in scope, so recreate it!";
    nccl_ctxs_ = scope->Var(var_name)->GetMutable<platform::NCCLCommunicator>();
272
    InitNCCLCtxs(scope, *bst);
273
  }
274 275
#endif

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
#if defined(PADDLE_WITH_XPU_BKCL)
  void InitBKCLCtxs(framework::Scope *scope, const BuildStrategy &bst) {
    VLOG(1) << "bkcl comm num:" << bst.bkcl_comm_num_ << ", nranks:" << nranks_
            << ", num_trainers:" << bst.num_trainers_
            << ", trainer_id:" << bst.trainer_id_;

    PADDLE_ENFORCE_EQ(bst.use_hierarchical_allreduce_, false,
                      platform::errors::Unimplemented(
                          "xpu doesn't support use_hierarchical_allreduce"));

    std::vector<BKCLUniqueId *> flat_bkcl_ids;
    if (nranks_ == 1) {
      // FIXME(gongwb): need not to create bkclid when nranks==1
      bkcl_ctxs_->InitFlatCtxs(places_, flat_bkcl_ids, bst.num_trainers_,
                               bst.trainer_id_);
      return;
    }

    if (bst.enable_parallel_graph_) {
      VLOG(1) << "use only one bkclid in pg model";

      BKCLUniqueId *bkcl_id = nullptr;

      std::string var_name = platform::GetFlatBKCLVarName(0);
      auto bkcl_id_var = scope->FindVar(var_name);
      std::unique_ptr<BKCLUniqueId> id(new BKCLUniqueId());
      if (bkcl_id_var) {
        bkcl_id = bkcl_id_var->GetMutable<BKCLUniqueId>();
      } else {
        PADDLE_ENFORCE_EQ(
            bkcl_get_unique_id(id.get()), BKCL_SUCCESS,
            platform::errors::Unavailable("bkcl get unique id failed"));
        bkcl_id = id.get();
      }

      flat_bkcl_ids.push_back(bkcl_id);

      bkcl_ctxs_->InitFlatCtxs(places_, flat_bkcl_ids, bst.num_trainers_,
                               bst.trainer_id_);
      VLOG(1) << "init bst bkcl context complete!";
      return;
    }

    // num_trainers ==1 && places > 1
    if (bst.num_trainers_ == 1) {
      bkcl_ctxs_->InitFlatCtxs(places_, flat_bkcl_ids, bst.num_trainers_,
                               bst.trainer_id_);
      return;
    }

    for (int i = 0; i < static_cast<int>(bst.bkcl_comm_num_); i++) {
      std::string var_name = platform::GetFlatBKCLVarName(i);
      auto bkcl_id_var = scope->FindVar(var_name);
      PADDLE_ENFORCE_NOT_NULL(
          bkcl_id_var,
          platform::errors::NotFound("can't find %s bkcl_id_var", var_name));
      auto bkcl_id = bkcl_id_var->GetMutable<BKCLUniqueId>();
      flat_bkcl_ids.push_back(bkcl_id);
    }

    bkcl_ctxs_->InitFlatCtxs(places_, flat_bkcl_ids, bst.num_trainers_,
                             bst.trainer_id_);
  }

  void InitOrGetBKCLCommunicator(framework::Scope *scope,
                                 const BuildStrategy &bst) {
    const std::string var_name = "BKCLCommunicator";
    auto var = scope->FindVar(var_name);
    if (var != nullptr) {
      PADDLE_ENFORCE_EQ(var->IsInitialized(), true,
                        platform::errors::PreconditionNotMet(
                            "if %s exists, it must be initialized", var_name));
      VLOG(1) << "find " << var_name
              << " in scope, so use it and does not recreate!";
      bkcl_ctxs_ = var->GetMutable<platform::BKCLCommunicator>();
      return;
    }

    VLOG(1) << "not find " << var_name << " in scope, so recreate it!";
    bkcl_ctxs_ = scope->Var(var_name)->GetMutable<platform::BKCLCommunicator>();
    InitBKCLCtxs(scope, bst);
  }
#endif

360 361 362 363 364
  inline bool IsPersistable(const std::string &name) const {
    auto iter = is_persistable_.find(name);
    return iter != is_persistable_.end() && iter->second;
  }

D
dzhwinter 已提交
365
  BuildStrategy build_strategy_;
Y
Yu Yang 已提交
366 367
  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
368
  std::vector<Scope *> local_exec_scopes_;
369
  Scope *global_scope_;  // not owned
Y
Yu Yang 已提交
370
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
371

372 373
  std::unordered_map<std::string, bool> is_persistable_;

374
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
375
  platform::NCCLCommunicator *nccl_ctxs_{nullptr};
376 377
#elif defined(PADDLE_WITH_XPU_BKCL)
  platform::BKCLCommunicator *bkcl_ctxs_{nullptr};
Y
Yu Yang 已提交
378
#endif
C
chengduoZH 已提交
379
  bool own_local_scope_;
380
  DeviceType use_device_;
381
  bool use_all_reduce_;
382
  size_t nranks_;
S
sneaxiy 已提交
383

384
  ir::MemOptVarInfoMapList mem_opt_var_infos_;
385
  ir::GarbageCollectorMap gcs_;
386 387

  details::ParallelSSAGraphExecutor *inference_executor_{nullptr};
Y
Yu Yang 已提交
388 389
};

390 391
bool ParallelExecutorPrivate::IsUseCUDA(DeviceType use_device) {
  return use_device == p::kCUDA;
392 393
}

394 395 396 397 398 399 400 401 402 403
void ParallelExecutorPrivate::SetHasFeed(size_t dev_idx, bool has_feed) {
  if (inference_executor_) {
    inference_executor_->SetHasFeed(dev_idx, has_feed);
  }
}

bool ParallelExecutorPrivate::AllowPartialFeed() const {
  return inference_executor_ && inference_executor_->SupportPartialFeed();
}

404
ir::Graph *ParallelExecutorPrivate::ApplyMemoryOptimizePass(ir::Graph *graph) {
Z
Zeng Jinle 已提交
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
  /**
   * NOTE(zengjinle): If BuildStrategy.memory_optimize = None in Python,
   * set BuildStrategy.memory_optimize according to whether gc is enabled.
   * If gc is enabled, BuildStrategy.memory_optimize = False.
   * If gc is disabled, BuildStrategy.memory_optimize = True.
   * This is because gc+memory_optimize is worse than gc only.
   *
   * As an option, users can enable BuildStrategy.memory_optimize forcely
   * by setting True, and disable it forcely by setting False.
   */
  bool is_gc_enabled = (GetEagerDeletionThreshold() >= 0);
  if (!build_strategy_.memory_optimize_) {
    build_strategy_.memory_optimize_ = !is_gc_enabled;
  }

  bool need_mem_opt = build_strategy_.enable_inplace_ ||
421
                      build_strategy_.enable_addto_ ||
Z
Zeng Jinle 已提交
422 423 424 425
                      build_strategy_.memory_optimize_.get() || is_gc_enabled;

  if (!need_mem_opt) return graph;

426 427 428 429 430 431 432 433
  std::vector<ir::LastLiveOpsOfVars> last_live_ops_of_vars;

  auto ref_cnt_pass = ir::PassRegistry::Instance().Get("reference_count_pass");
  ref_cnt_pass->SetNotOwned(ir::kMemOptVarInfoMapList, &mem_opt_var_infos_);
  ref_cnt_pass->SetNotOwned(ir::kLastLiveOpsOfVars, &last_live_ops_of_vars);
  graph = ref_cnt_pass->Apply(graph);
  VLOG(10) << "ReferenceCountPass Applied";

434 435 436 437
  if (build_strategy_.enable_addto_) {
    auto addto_pass = ir::PassRegistry::Instance().Get("inplace_addto_op_pass");
    addto_pass->SetNotOwned(ir::kMemOptVarInfoMapList, &mem_opt_var_infos_);
    addto_pass->SetNotOwned(ir::kLastLiveOpsOfVars, &last_live_ops_of_vars);
438
    addto_pass->Set(ir::kUseCuda, new bool(use_device_ == p::kCUDA));
439 440 441 442 443
    VLOG(10) << "Start to apply inplace_addto_op_pass";
    graph = addto_pass->Apply(graph);
    VLOG(10) << "inplace_addto_op_pass Applied";
  }

444 445 446 447 448
  if (build_strategy_.enable_inplace_) {
    auto inplace_pass =
        ir::PassRegistry::Instance().Get("buffer_shared_inplace_pass");
    inplace_pass->SetNotOwned(ir::kMemOptVarInfoMapList, &mem_opt_var_infos_);
    inplace_pass->SetNotOwned(ir::kLastLiveOpsOfVars, &last_live_ops_of_vars);
449
    inplace_pass->Set(ir::kUseCuda, new bool(use_device_ == p::kCUDA));
450 451 452
    VLOG(10) << "Start to apply buffer_shared_inplace_pass";
    graph = inplace_pass->Apply(graph);
    VLOG(10) << "buffer_shared_inplace_pass Applied";
453 454
    VLOG(1) << "Inplace strategy is enabled, when "
               "build_strategy.enable_inplace = True";
455 456
  }

457
  if (build_strategy_.memory_optimize_.get()) {
458 459 460 461 462 463
    auto cross_op_memory_reuse_pass = ir::PassRegistry::Instance().Get(
        "buffer_shared_cross_op_memory_reuse_pass");
    cross_op_memory_reuse_pass->SetNotOwned(ir::kMemOptVarInfoMapList,
                                            &mem_opt_var_infos_);
    cross_op_memory_reuse_pass->SetNotOwned(ir::kLastLiveOpsOfVars,
                                            &last_live_ops_of_vars);
464
    cross_op_memory_reuse_pass->Set(ir::kUseCuda,
465
                                    new bool(use_device_ == p::kCUDA));
466 467 468
    VLOG(10) << "Start to apply buffer_shared_cross_op_memory_reuse_pass";
    graph = cross_op_memory_reuse_pass->Apply(graph);
    VLOG(10) << "buffer_shared_cross_op_memory_reuse_pass Applied";
Z
Zeng Jinle 已提交
469 470 471
    LOG(INFO) << "Cross op memory reuse strategy is enabled, when "
                 "build_strategy.memory_optimize = True or garbage collection "
                 "strategy is disabled, which is not recommended";
472
  }
473

474
  if (!is_gc_enabled) {
475 476 477 478
    return graph;
  }
  size_t max_memory_size = static_cast<size_t>(GetEagerDeletionThreshold());

S
sneaxiy 已提交
479 480 481 482 483
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &place = places_[i];
    if (gcs_.count(place) > 0) {
      continue;
    }
S
sneaxiy 已提交
484
    std::unique_ptr<GarbageCollector> gc;
S
sneaxiy 已提交
485
    if (platform::is_gpu_place(place)) {
486
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
S
sneaxiy 已提交
487
      if (IsFastEagerDeletionModeEnabled()) {
S
sneaxiy 已提交
488
        gc.reset(new UnsafeFastGPUGarbageCollector(
489
            BOOST_GET_CONST(platform::CUDAPlace, place), max_memory_size));
S
sneaxiy 已提交
490
      } else {
S
sneaxiy 已提交
491
        gc.reset(new StreamGarbageCollector(
492
            BOOST_GET_CONST(platform::CUDAPlace, place), max_memory_size));
S
sneaxiy 已提交
493 494
      }
      VLOG(10) << "Created " << i << "-th GarbageCollector at " << place;
495 496 497 498
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use CUDA device since it's not compiled with CUDA,"
          "Please recompile or reinstall Paddle with GPU support."));
S
sneaxiy 已提交
499
#endif
500 501 502 503 504 505 506 507 508
    } else if (platform::is_xpu_place(place)) {
#if defined(PADDLE_WITH_XPU)
      gc.reset(new XPUGarbageCollector(
          BOOST_GET_CONST(platform::XPUPlace, place), max_memory_size));
      VLOG(10) << "Created " << i << "-th GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use XPU device since it's not compiled with XPU,"
          "Please recompile or reinstall Paddle with XPU support."));
S
sneaxiy 已提交
509
#endif
510 511 512 513 514 515 516 517
    } else if (platform::is_cpu_place(place)) {
      gc.reset(new CPUGarbageCollector(
          BOOST_GET_CONST(platform::CPUPlace, place), max_memory_size));
      VLOG(10) << "Created GarbageCollector at " << place;
    } else {
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "Unsupported place for garbage collection"));
    }
S
sneaxiy 已提交
518
    gcs_.emplace(place, std::move(gc));
S
sneaxiy 已提交
519 520
  }

S
sneaxiy 已提交
521
  if (!gcs_.empty()) {
S
sneaxiy 已提交
522 523
    auto eager_deletion_pass =
        ir::PassRegistry::Instance().Get("eager_deletion_pass");
524 525
    eager_deletion_pass->SetNotOwned(ir::kMemOptVarInfoMapList,
                                     &mem_opt_var_infos_);
526 527
    eager_deletion_pass->SetNotOwned(ir::kGarbageCollector, &gcs_);
    eager_deletion_pass->SetNotOwned(ir::kLastLiveOpsOfVars,
S
sneaxiy 已提交
528
                                     &last_live_ops_of_vars);
529
    eager_deletion_pass->SetNotOwned(ir::kAllPlaces, &places_);
530
    graph = eager_deletion_pass->Apply(graph);
S
sneaxiy 已提交
531
    VLOG(10) << "EagerDeletionPass Applied";
532 533 534
    VLOG(1) << "Garbage collection strategy is enabled, when "
            << "FLAGS_eager_delete_tensor_gb = "
            << FLAGS_eager_delete_tensor_gb;
S
sneaxiy 已提交
535 536 537 538
  }
  return graph;
}

539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
class ResetHasFeedGuard {
 public:
  explicit ResetHasFeedGuard(ParallelExecutorPrivate *pe_member)
      : pe_member_(pe_member) {}

  ~ResetHasFeedGuard() {
    for (size_t i = 0; i < pe_member_->places_.size(); ++i) {
      pe_member_->SetHasFeed(i, false);
    }
  }

 private:
  ParallelExecutorPrivate *pe_member_;
};

554 555
size_t ParallelExecutor::DeviceCount() const { return member_->places_.size(); }

556 557 558 559
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

560 561 562 563 564 565 566 567 568 569 570 571 572 573
void ParallelExecutor::DropLocalExeScopes() {
  auto executor = dynamic_cast<details::ScopeBufferedSSAGraphExecutor *>(
      member_->executor_.get());
  if (executor) {
    executor->DropLocalExeScopes();
  }
}

bool ParallelExecutor::NeedCreateLocalExeScope() {
  auto executor = dynamic_cast<details::ScopeBufferedSSAGraphExecutor *>(
      member_->executor_.get());
  return executor && executor->NeedCreateLocalExeScope();
}

574
void InitP2P(const std::vector<platform::Place> &places) {
575
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
  std::call_once(p2p_init_flag, [&]() {
    int count = places.size();
    if (count <= 1) return;

    std::vector<int> devices;
    for (int i = 0; i < count; i++) {
      if (!is_gpu_place(places[i])) return;

      platform::CUDAPlace device =
          BOOST_GET_CONST(platform::CUDAPlace, places[i]);
      devices.push_back(device.GetDeviceId());
    }

    for (int i = 0; i < count; ++i) {
      for (int j = 0; j < count; ++j) {
        if (devices[i] == devices[j]) continue;
        int can_acess = -1;
593 594 595 596 597
#ifdef PADDLE_WITH_HIP
        hipError_t ret =
            hipDeviceCanAccessPeer(&can_acess, devices[i], devices[j]);
        if (ret != hipSuccess || can_acess != 1) {
#else
598 599 600
        cudaError_t ret =
            cudaDeviceCanAccessPeer(&can_acess, devices[i], devices[j]);
        if (ret != cudaSuccess || can_acess != 1) {
601
#endif
602 603 604 605
          LOG(WARNING) << "Cannot enable P2P access from " << devices[i]
                       << " to " << devices[j];
        } else {
          platform::CUDADeviceGuard guard(devices[i]);
606 607 608
#ifdef PADDLE_WITH_HIP
          hipDeviceEnablePeerAccess(devices[j], 0);
#else
609
          cudaDeviceEnablePeerAccess(devices[j], 0);
610
#endif
611 612 613 614 615 616 617 618
        }
      }
    }
    VLOG(1) << "init p2p";
  });
#endif
}

Y
Yan Xu 已提交
619 620 621 622 623 624 625 626
ParallelExecutor::ParallelExecutor(const std::vector<platform::Place> &places,
                                   const std::vector<std::string> &bcast_vars,
                                   const std::string &loss_var_name,
                                   Scope *scope,
                                   const std::vector<Scope *> &local_scopes,
                                   const ExecutionStrategy &exec_strategy,
                                   const BuildStrategy &build_strategy,
                                   ir::Graph *graph)
627
    : member_(new ParallelExecutorPrivate(places, scope)) {
628 629 630
  PADDLE_ENFORCE(places.size() > 0 && !is_npu_place(places[0]),
                 platform::errors::Unavailable(
                     "NPU is not supported in ParallelExecutor"));
631
  InitP2P(places);
632 633
  ir::InitReaderQueueDeviceCount(graph, *(member_->global_scope_),
                                 member_->places_.size());
634
  member_->use_device_ = exec_strategy.use_device_;
D
dzhwinter 已提交
635
  member_->build_strategy_ = build_strategy;
C
chengduo 已提交
636 637
  member_->use_all_reduce_ = member_->build_strategy_.reduce_ ==
                             BuildStrategy::ReduceStrategy::kAllReduce;
X
Xin Pan 已提交
638
  member_->nranks_ = build_strategy.num_trainers_ * places.size();
C
chengduo 已提交
639 640 641 642 643 644 645
  if (!member_->use_all_reduce_ && member_->nranks_ == 1) {
    LOG(INFO) << "If you set build_strategy.reduce with 'Reduce',"
                 "the number of places should be greater than 1.";
    member_->build_strategy_.reduce_ =
        BuildStrategy::ReduceStrategy::kAllReduce;
    member_->use_all_reduce_ = true;
  }
646
#if (defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)) && defined(_WIN32)
647
  if (member_->IsUseCUDA(member_->use_device_)) {
648 649 650
    PADDLE_ENFORCE_EQ(
        places.size(), 1,
        platform::errors::Unavailable("Windows can support Single GPU only."));
651 652
  }
#endif
Y
Yancey1989 已提交
653

654 655
#if (defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)) && \
    (!defined(PADDLE_WITH_NCCL) && !defined(PADDLE_WITH_RCCL))
656
  if (member_->IsUseCUDA(member_->use_device_)) {
657 658 659 660 661 662 663 664
    PADDLE_ENFORCE_EQ(
        places.size(), 1,
        platform::errors::PermissionDenied(
            "Your machine has multiple cards, "
            "but the WITH_NCCL option is not turned on during compilation, "
            "and you cannot use multi-card training or prediction. "
            "Please recompile and turn on the WITH_NCCL option."));
  }
665 666
#endif

667
  std::string device_name;
668
  if (member_->use_device_ == p::kCPU) {
669
    device_name = "CPU";
670
  } else if (member_->use_device_ == p::kCUDA) {
671 672 673 674 675
    device_name = "CUDA";
  } else {
    device_name = "XPU";
  }

676
  VLOG(1) << string::Sprintf(
677 678
      "The Program will be executed on %s using ParallelExecutor, %lu "
      "cards are used, so %lu programs are executed in parallel.",
679
      device_name, places.size(), places.size());
C
chengduo 已提交
680

681
  // Step 1. Bcast the bcast_vars to devs.
Y
Yu Yang 已提交
682
  // Create local scopes
683
  if (local_scopes.empty()) {
C
chengduoZH 已提交
684
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
685 686
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
687
      member_->local_scopes_.emplace_back(&scope->NewScope());
688 689
    }
  } else {
C
chengduoZH 已提交
690
    member_->own_local_scope_ = false;
691 692 693 694 695
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size(),
                      platform::errors::PreconditionNotMet(
                          "member_->places_.size() = %d is not equal to "
                          "local_scopes.size() = %d",
                          member_->places_.size(), local_scopes.size()));
696
    for (size_t i = 0; i < member_->places_.size(); ++i) {
697
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
698
    }
Y
Yu Yang 已提交
699 700
  }

Q
Qiao Longfei 已提交
701
  std::vector<ir::Graph *> graphs;
C
chengduo 已提交
702
  if (member_->build_strategy_.async_mode_) {
703
    PADDLE_ENFORCE_EQ(member_->IsUseCUDA(member_->use_device_), false,
704 705
                      platform::errors::Unavailable(
                          "gpu mode does not support async_mode_ now!"));
Q
Qiao Longfei 已提交
706
    graphs.push_back(graph);
D
dongdaxiang 已提交
707
    for (size_t i = 1; i < places.size(); ++i) {
Q
Qiao Longfei 已提交
708 709 710 711
      auto *tmp_graph = new ir::Graph(graph->OriginProgram());
      async_graphs_.emplace_back(tmp_graph);
      graphs.push_back(tmp_graph);
    }
Q
Qiao Longfei 已提交
712
  }
Q
Qiao Longfei 已提交
713

Y
Yancey1989 已提交
714 715 716
  // FIXME(Yancey1989): parallel graph mode get better performance
  // in GPU allreduce distributed training. Need an elegant way to
  // choice the execution strategy.
C
chengduo 已提交
717 718 719 720
  member_->build_strategy_.enable_parallel_graph_ =
      EnableParallelGraphExecution(*graph, exec_strategy,
                                   member_->build_strategy_);
  if (member_->build_strategy_.enable_parallel_graph_) {
721 722 723 724
    LOG(INFO) << "The Executor would execute the graph by ParallelGraph "
                 "Execution which can get better performance,"
              << "you can force it off by env FLAGS_enable_parallel_graph=0";
  }
Y
Yancey1989 已提交
725

726
  if (member_->IsUseCUDA(member_->use_device_) && member_->nranks_ > 1) {
727
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
728
    member_->InitOrGetNCCLCommunicator(scope, &member_->build_strategy_);
Q
qingqing01 已提交
729

W
Wu Yi 已提交
730 731 732
    // Initialize device context's nccl comm, will be used by normal
    // Operators like sync_batch_norm, and collective ops.
    // NOTE: more than one ParallelExecutor with same place, the nccl comm will
Q
qingqing01 已提交
733
    // be rewrite and there will be some problem.
W
Wu Yi 已提交
734 735 736
    // NOTE: NCCL group-calls and non-group-calls can not use the same
    // NCCL communicator, so for ParallelGraph and Multi-Process mode, re-use
    // same communicators.
737 738
    auto *nccl_ctxs =
        member_->nccl_ctxs_->GetSyncBatchNormCtx(scope, member_->places_);
739
    auto &pool = platform::DeviceContextPool::Instance();
Q
qingqing01 已提交
740 741 742
    for (size_t dev_id = 0; dev_id < member_->places_.size(); ++dev_id) {
      auto *dev_ctx = static_cast<platform::CUDADeviceContext *>(
          pool.Get(member_->places_[dev_id]));
743
      auto &nccl_ctx = nccl_ctxs->at(member_->places_[dev_id]);
744
      dev_ctx->set_nccl_comm(nccl_ctx.comm());
Q
qingqing01 已提交
745
    }
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
#else
    PADDLE_THROW(
        platform::errors::PreconditionNotMet("Not compiled with CUDA."));
#endif
  }
  if (member_->use_device_ == p::kXPU && member_->nranks_ > 1) {
#if defined(PADDLE_WITH_XPU_BKCL)
    member_->InitOrGetBKCLCommunicator(scope, member_->build_strategy_);

    auto *bkcl_ctxs =
        member_->bkcl_ctxs_->GetSyncBatchNormCtx(scope, member_->places_);
    auto &pool = platform::DeviceContextPool::Instance();
    for (size_t dev_id = 0; dev_id < member_->places_.size(); ++dev_id) {
      auto *dev_ctx = static_cast<platform::XPUDeviceContext *>(
          pool.Get(member_->places_[dev_id]));
      auto &bkcl_ctx = bkcl_ctxs->at(member_->places_[dev_id]);
      dev_ctx->set_bkcl_context(bkcl_ctx.comm());
    }
#else
    PADDLE_THROW(
        platform::errors::PreconditionNotMet("Not compiled with XPU."));
Y
Yu Yang 已提交
767
#endif
C
chengduoZH 已提交
768
  }
Y
Yan Xu 已提交
769 770
  // broadcast parameters from the 0th device to others:
  auto need_broadcast = [&]() -> bool {
C
chengduo 已提交
771
    if (member_->build_strategy_.num_trainers_ > 1) {
Y
Yan Xu 已提交
772 773 774 775 776 777 778 779 780
      // 1. num_tariners would be grater than 1 for nccl distributed training.
      return true;
    } else if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
      // 2. Only one trainer process, but ParallelExecutor hold multiple
      // devices.
      return true;
    }
    return false;
  };
781
  // Bcast Parameters to all GPUs
Y
Yan Xu 已提交
782
  if (need_broadcast()) {
C
chengduo 已提交
783
    BCastParamsToDevices(bcast_vars, member_->build_strategy_.trainer_id_);
Y
Yu Yang 已提交
784
  }
785

Q
Qiao Longfei 已提交
786
  // Startup Program has been run. All local scopes has correct parameters.
Y
yuyang18 已提交
787

Q
Qiao Longfei 已提交
788 789 790
  // Step 2. Convert main_program to SSA form and dependency graph. Also, insert
  // ncclOp
  std::vector<ir::Graph *> async_graphs(places.size());
791
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
C
chengduo 已提交
792
  if (member_->build_strategy_.async_mode_) {
Q
Qiao Longfei 已提交
793
    VLOG(3) << "use local async mode";
C
chengduo 已提交
794 795
    graph = member_->build_strategy_.Apply(
        graph, {member_->places_[0]}, loss_var_name,
796 797
        {member_->local_scopes_[0]}, 1, member_->use_device_,
        member_->nccl_ctxs_);
D
dongdaxiang 已提交
798
    for (size_t i = 1; i < member_->places_.size(); ++i) {
C
chengduo 已提交
799 800
      graphs[i] = member_->build_strategy_.Apply(
          graphs[i], {member_->places_[i]}, loss_var_name,
801 802
          {member_->local_scopes_[i]}, 1, member_->use_device_,
          member_->nccl_ctxs_);
803
      async_graphs[i] = graphs[i];
Q
Qiao Longfei 已提交
804
    }
Q
Qiao Longfei 已提交
805
  } else {
C
chengduo 已提交
806 807
    graph = member_->build_strategy_.Apply(
        graph, member_->places_, loss_var_name, member_->local_scopes_,
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
        member_->nranks_, member_->use_device_, member_->nccl_ctxs_);
  }
#elif defined(PADDLE_WITH_XPU_BKCL)
  if (member_->build_strategy_.async_mode_) {
    VLOG(3) << "use local async mode";
    graph = member_->build_strategy_.Apply(
        graph, {member_->places_[0]}, loss_var_name,
        {member_->local_scopes_[0]}, 1, member_->use_device_,
        member_->bkcl_ctxs_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
      graphs[i] = member_->build_strategy_.Apply(
          graphs[i], {member_->places_[i]}, loss_var_name,
          {member_->local_scopes_[i]}, 1, member_->use_device_,
          member_->bkcl_ctxs_);
      async_graphs[i] = graphs[i];
    }
  } else {
    graph = member_->build_strategy_.Apply(
        graph, member_->places_, loss_var_name, member_->local_scopes_,
        member_->nranks_, member_->use_device_, member_->bkcl_ctxs_);
Q
Qiao Longfei 已提交
828
  }
C
chengduoZH 已提交
829
#else
C
chengduo 已提交
830
  if (member_->build_strategy_.async_mode_) {
Q
Qiao Longfei 已提交
831
    VLOG(3) << "use local async mode";
C
chengduo 已提交
832 833
    graph = member_->build_strategy_.Apply(
        graph, {member_->places_[0]}, loss_var_name,
834
        {member_->local_scopes_[0]}, 1, member_->use_device_);
835
    for (size_t i = 1; i < member_->places_.size(); ++i) {
C
chengduo 已提交
836
      graphs[i] = member_->build_strategy_.Apply(
837
          graphs[i], {member_->places_[i]}, loss_var_name,
838
          {member_->local_scopes_[i]}, 1, member_->use_device_);
839
      async_graphs[i] = graphs[i];
Q
Qiao Longfei 已提交
840
    }
Q
can run  
Qiao Longfei 已提交
841
  } else {
C
chengduo 已提交
842 843
    graph = member_->build_strategy_.Apply(
        graph, member_->places_, loss_var_name, member_->local_scopes_,
844
        member_->nranks_, member_->use_device_);
Q
can run  
Qiao Longfei 已提交
845
  }
Y
Yu Yang 已提交
846
#endif
847

848
  graph = member_->ApplyMemoryOptimizePass(graph);
Y
Yancey1989 已提交
849

Q
Qiao Longfei 已提交
850 851
  async_graphs[0] = graph;

852 853
  // Step 3. Create vars in each scope. Passes may also create new vars.
  //         skip control vars and empty vars
Y
Yancey1989 已提交
854
  std::vector<details::VariableInfo> var_infos;
Q
Qiao Longfei 已提交
855 856 857 858 859 860
  for (auto &node : graph->Nodes()) {
    if (node->IsVar() && !node->IsCtrlVar() && node->Var()) {
      var_infos.emplace_back();
      var_infos.back().name_ = node->Var()->Name();
      var_infos.back().type_ = node->Var()->GetType();
      var_infos.back().persistable_ = node->Var()->Persistable();
861 862 863

      member_->is_persistable_.emplace(node->Var()->Name(),
                                       node->Var()->Persistable());
Y
Yancey1989 已提交
864 865
    }
  }
Y
Yancey1989 已提交
866

867 868 869 870 871 872 873 874 875 876 877
  if (graph->Has(details::kFusedVars)) {
    auto &fused_vars = graph->Get<details::FusedVars>(details::kFusedVars);
    for (auto &fused_var : fused_vars) {
      var_infos.emplace_back();
      var_infos.back() = fused_var.second;

      member_->is_persistable_.emplace(fused_var.first,
                                       fused_var.second.persistable_);
    }
  }

878 879 880 881 882 883 884
  std::unordered_map<Scope *, Scope *> scope_map;
  for (auto *scope : member_->local_scopes_) {
    auto &local_exec_scope = scope->NewScope();
    member_->local_exec_scopes_.emplace_back(&local_exec_scope);
    scope_map.emplace(scope, &local_exec_scope);
  }

885 886 887 888 889 890
  PADDLE_ENFORCE_EQ(
      member_->local_scopes_.size(), member_->local_exec_scopes_.size(),
      platform::errors::PreconditionNotMet(
          "member_->local_scopes_.size() = %d is not equal to "
          "member_->local_exec_scopes_.size() = %d",
          member_->local_scopes_.size(), member_->local_exec_scopes_.size()));
891 892 893

  std::vector<ir::Graph *> final_graphs;

C
chengduo 已提交
894
  if (member_->build_strategy_.async_mode_) {
Q
can run  
Qiao Longfei 已提交
895 896
    VLOG(3) << "use AsyncSSAGraphExecutor";
    member_->executor_.reset(new details::AsyncSSAGraphExecutor(
897 898 899
        exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
        member_->places_, async_graphs));
    final_graphs = async_graphs;
C
chengduo 已提交
900
  } else if (member_->build_strategy_.enable_parallel_graph_) {
Q
can run  
Qiao Longfei 已提交
901
    VLOG(3) << "use ParallelSSAGraphExecutor";
902
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yancey1989 已提交
903 904
    // TODO(Yancey1989): Remove passing in the main_program when
    // allreduce_seq_pass doesn't need it as the attr.
905 906 907
    bool is_inference = details::IsDataParallelInferenceGraph(*graph);
    bool has_drop_last_read_op = details::HasDropLastReadOp(*graph);

908 909 910 911 912
    auto *pg_exe = new details::ParallelSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
        member_->places_, graph);
    final_graphs = pg_exe->Graphs();
    member_->executor_.reset(pg_exe);
913 914 915 916 917 918 919 920

    if (is_inference && member_->places_.size() > 1) {
      member_->inference_executor_ = pg_exe;
      if (!has_drop_last_read_op) {
        VLOG(5) << "Enable partial feed support in inference phase";
        pg_exe->EnablePartialFeedSupport();
      }
    }
Y
Yancey1989 已提交
921
#else
922 923
    PADDLE_THROW(platform::errors::PreconditionNotMet(
        "Paddle should be compiled with CUDA for ParallelGraph Execution."));
Y
Yancey1989 已提交
924
#endif
Y
yuyang18 已提交
925
  } else {
926 927 928 929 930 931
    bool has_drop_last_read_op = details::HasDropLastReadOp(*graph);
    auto possible_inference_graphs =
        details::TrySeparateToMultipleSingleDeviceGraphs(graph);
    if (!possible_inference_graphs.empty()) {
      VLOG(5) << "Use ParallelSSAGraphExecutor in inference phase";
      auto *pg_exe = new details::ParallelSSAGraphExecutor(
932
          exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
933 934 935 936 937 938 939 940
          member_->places_, std::move(possible_inference_graphs));
      if (!has_drop_last_read_op) {
        VLOG(5) << "Enable partial feed support in inference phase";
        pg_exe->EnablePartialFeedSupport();
      }
      final_graphs = pg_exe->Graphs();
      member_->executor_.reset(pg_exe);
      member_->inference_executor_ = pg_exe;
Y
Yancey1989 已提交
941
    } else {
942 943 944 945 946 947 948 949 950
      LOG_IF(WARNING, details::HasKeepLastReadOp(*graph))
          << "drop_last=False for DataLoader is not supported in training "
             "network. It is automatically turned to drop_last=True.";
      if (exec_strategy.type_ == ExecutionStrategy::kDefault) {
        VLOG(3) << "use ThreadedSSAGraphExecutor";
        member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
            exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
            member_->places_, graph));
      } else {
951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
        if (member_->use_device_ == p::kXPU) {
#if defined(PADDLE_WITH_XPU)
          VLOG(3) << "use BindThreadedSSAGraphExecutor";
          member_->executor_.reset(new details::BindThreadedSSAGraphExecutor(
              exec_strategy, member_->local_scopes_,
              member_->local_exec_scopes_, member_->places_, graph));
#else
          PADDLE_THROW(platform::errors::PermissionDenied(
              "Paddle can't use XPU device since it's not compiled with XPU,"
              "Please recompile or reinstall Paddle with XPU support."));
#endif
        } else {
          VLOG(3) << "use FastThreadedSSAGraphExecutor";
          member_->executor_.reset(new details::FastThreadedSSAGraphExecutor(
              exec_strategy, member_->local_scopes_,
              member_->local_exec_scopes_, member_->places_, graph));
        }
968 969
      }
      final_graphs.emplace_back(graph);
Y
Yancey1989 已提交
970
    }
C
chengduoZH 已提交
971
  }
Y
yuyang18 已提交
972

Q
can run  
Qiao Longfei 已提交
973
  VLOG(3) << "use ScopeBufferedSSAGraphExecutor";
C
chengduo 已提交
974
  if (!member_->build_strategy_.async_mode_) {
Q
Qiao Longfei 已提交
975
    member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
976 977 978 979 980 981 982 983 984
        exec_strategy, member_->local_scopes_, member_->local_exec_scopes_,
        std::move(var_infos), member_->places_, std::move(member_->executor_)));
  }

  for (auto *g : final_graphs) {
    auto ops = ir::FilterByNodeWrapper<details::OpHandleBase>(*g);
    for (auto *op : ops) {
      op->SetLocalExecScopes(scope_map);
    }
Q
Qiao Longfei 已提交
985
  }
986 987 988 989 990 991 992 993

  if (final_graphs.size() == 1) {
    ir::SetReaderOpDeviceInfo(final_graphs[0], member_->places_.size());
  } else {
    for (size_t i = 0; i < final_graphs.size(); ++i) {
      ir::SetReaderOpDeviceInfo(final_graphs[i], member_->places_.size(), i);
    }
  }
Y
Yu Yang 已提交
994 995
}

Y
Yancey1989 已提交
996
void ParallelExecutor::BCastParamsToDevices(
Y
Yan Xu 已提交
997
    const std::vector<std::string> &vars, int trainer_id) const {
Q
Qiao Longfei 已提交
998
  VLOG(3) << "BCastParamsToDevices";
X
Xin Pan 已提交
999
  // the initializing bcast, all vars would be bcast from device(0).
1000
  for (auto &var : vars) {
X
Xin Pan 已提交
1001
    framework::Variable *main_var = member_->local_scopes_[0]->FindVar(var);
J
JiayiFeng 已提交
1002
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
1003 1004 1005 1006
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
1007
    if (!main_tensor.IsInitialized()) {
M
minqiyang 已提交
1008
      VLOG(3) << "one in var not inited, return!";
1009 1010
      continue;
    }
1011 1012
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
1013
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
1014
      std::vector<void *> buffers;
C
chengduo 已提交
1015
      buffers.reserve(member_->places_.size());
1016 1017 1018 1019 1020
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
1021

Y
Yan Xu 已提交
1022
        if (i == 0 && trainer_id == 0) {
1023 1024
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
1025
          auto local_scope = member_->local_scopes_[i];
1026
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
1027
          t->Resize(dims);
1028
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
1029
        }
1030
        buffers.push_back(buffer);
1031
      }
1032

1033
      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
1034 1035 1036 1037
                        platform::errors::PreconditionNotMet(
                            "variables' buffer size to bcast is %d, which is "
                            "NOT equal to places size %d",
                            buffers.size(), member_->places_.size()));
1038
      {
1039
        auto *nccl_ctxs = member_->nccl_ctxs_->DefaultFlatCtx();
1040 1041
        platform::NCCLGroupGuard guard;
        for (size_t i = 0; i < member_->places_.size(); ++i) {
1042
          auto &nccl_ctx = nccl_ctxs->at(member_->places_[i]);
X
Xin Pan 已提交
1043 1044
          platform::dynload::ncclBcast(buffers[i], numel, data_type, 0,
                                       nccl_ctx.comm_, nccl_ctx.stream());
1045
        }
1046
        nccl_ctxs->WaitAll();
1047
      }
1048 1049 1050 1051 1052 1053
#endif
    } else if (paddle::platform::is_xpu_place(main_tensor.place())) {
#if defined(PADDLE_WITH_XPU_BKCL)
      std::vector<void *> buffers;
      buffers.reserve(member_->places_.size());
      size_t numel = main_tensor.numel();
1054 1055 1056 1057 1058
      // TODO(liuyuhui): BKCL only support parameters using float type,
      // other parameters need to be strongly converted to float before
      // broadcasting,
      // but broadcast is equivalent to no type of operation, does not affect
      // correctness.
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
      BKCLDataType data_type = BKCL_FLOAT;
      // BKCLDataType data_type = platform::ToBKCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;

        if (i == 0 && trainer_id == 0) {
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
          auto local_scope = member_->local_scopes_[i];
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
          t->Resize(dims);
          buffer = t->mutable_data(place, main_tensor.type());
        }
        buffers.push_back(buffer);
      }

      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
                        platform::errors::PreconditionNotMet(
                            "variables' buffer size to bcast is %d, which is "
                            "NOT equal to places size %d",
                            buffers.size(), member_->places_.size()));
      {
        auto *bkcl_ctxs = member_->bkcl_ctxs_->DefaultFlatCtx();

        PADDLE_ENFORCE_EQ(
            bkcl_group_start(), BKCL_SUCCESS,
            platform::errors::Unavailable("bkcl_group_start failed"));
        for (size_t i = 0; i < member_->places_.size(); ++i) {
          auto &bkcl_ctx = bkcl_ctxs->at(member_->places_[i]);
1089
          auto broadcast_numel = numel;
1090
          if (main_tensor.type() == framework::proto::VarType::INT64) {
1091
            broadcast_numel *= 2;
1092 1093
          }
          PADDLE_ENFORCE_EQ(
1094 1095
              bkcl_broadcast(bkcl_ctx.comm(), buffers[i], buffers[i],
                             broadcast_numel, data_type, 0, NULL),
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105
              BKCL_SUCCESS,
              platform::errors::Unavailable("bkcl_broadcast failed"));
        }
        PADDLE_ENFORCE_EQ(
            bkcl_group_end(), BKCL_SUCCESS,
            platform::errors::Unavailable("bkcl_group_end failed"));
      }
#else
      PADDLE_THROW(
          platform::errors::PreconditionNotMet("Not compiled with BKCL."));
C
chengduoZH 已提交
1106
#endif
1107 1108
    } else {
      platform::CPUPlace cpu;
C
chengduo 已提交
1109
      for (size_t i = 1; i < member_->places_.size(); ++i) {
1110 1111
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
C
chengduo 已提交
1112

Q
Qiao Longfei 已提交
1113
        auto copy_memory = [&] {
1114 1115 1116
          t->Resize(dims);
          t->mutable_data(cpu, main_tensor.type());
          paddle::framework::TensorCopy(main_tensor, cpu, t);
Q
can run  
Qiao Longfei 已提交
1117 1118
        };

Q
Qiao Longfei 已提交
1119
        auto share_memory = [&] { t->ShareDataWith(main_tensor); };
Q
can run  
Qiao Longfei 已提交
1120 1121 1122 1123

        // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
        if (member_->build_strategy_.async_mode_) {
          share_memory();
1124 1125
        } else if (member_->use_all_reduce_ ||
                   member_->IsUseCUDA(member_->use_device_) ||
Q
can run  
Qiao Longfei 已提交
1126 1127
                   var == "@LR_DECAY_COUNTER@") {
          copy_memory();
1128
        } else {
Q
can run  
Qiao Longfei 已提交
1129
          share_memory();
1130
        }
Y
Yu Yang 已提交
1131
      }
Y
Stash  
Yu Yang 已提交
1132 1133
    }
  }
Y
Yu Yang 已提交
1134
}
Y
Yu Yang 已提交
1135

Z
Zhen Wang 已提交
1136 1137
FetchResultType ParallelExecutor::Run(
    const std::vector<std::string> &fetch_tensors, bool return_merged) {
1138
  VLOG(3) << "enter ParallelExecutor Run";
Y
Yu Yang 已提交
1139 1140 1141
#ifdef WITH_GPERFTOOLS
  if (gProfileStarted) {
    ProfilerFlush();
S
sneaxiy 已提交
1142 1143
  }
#endif
Y
Yu Yang 已提交
1144

X
Xin Pan 已提交
1145
  platform::RecordBlock b(0);
1146

1147 1148
  ResetHasFeedGuard reset_has_feed_guard(member_);

1149 1150
  ir::SkipMemOptVarsGuard guard(&(member_->mem_opt_var_infos_), fetch_tensors,
                                member_->HasGarbageCollectors());
1151 1152

  VLOG(3) << "ParallelExecutor begin to run member_->executor_->Run";
Z
Zhen Wang 已提交
1153
  auto fetch_data = member_->executor_->Run(fetch_tensors, return_merged);
1154
  return fetch_data;
Y
Yu Yang 已提交
1155
}
Y
Yu Yang 已提交
1156

Y
Yu Yang 已提交
1157 1158
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
  if (!member_->AllowPartialFeed()) {
    PADDLE_ENFORCE_EQ(tensors.size(), member_->local_scopes_.size(),
                      platform::errors::Unimplemented(
                          "The feed data number %d does not match the device "
                          "number %d. If you are using DataLoader to feed "
                          "data, this may be because you set drop_last=False "
                          "in training network. Currently, drop_last=False for "
                          "DataLoader is not supported for training network. "
                          "Please set drop_last=True when defining DataLoader.",
                          tensors.size(), member_->local_scopes_.size()));
  } else {
    PADDLE_ENFORCE_GE(member_->local_scopes_.size(), tensors.size(),
                      platform::errors::InvalidArgument(
                          "The feed tensor number exceeds the device number"));
  }
Y
Yu Yang 已提交
1174

1175
  size_t feed_num = 0;
Y
Yu Yang 已提交
1176 1177
  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
1178 1179 1180 1181 1182 1183
    if (map.empty()) {
      continue;
    }

    member_->SetHasFeed(i);
    ++feed_num;
Y
Yu Yang 已提交
1184
    for (auto &pair : map) {
1185
      bool is_persistable = member_->IsPersistable(pair.first);
1186 1187 1188
      if (!is_persistable) {
        member_->SetSkipMemoryReuse(i, pair.first);
      }
1189 1190 1191 1192 1193
      auto *feed_scope = is_persistable ? member_->local_scopes_[i]
                                        : member_->local_exec_scopes_[i];
      auto *feed_var = feed_scope->Var(pair.first);

      auto *trg = feed_var->GetMutable<LoDTensor>();
Y
Yu Yang 已提交
1194 1195 1196 1197
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209

  if (!member_->AllowPartialFeed()) {
    PADDLE_ENFORCE_EQ(feed_num, member_->local_scopes_.size(),
                      platform::errors::Unimplemented(
                          "The feed data number %d does not match the device "
                          "number %d. If you are using DataLoader to feed "
                          "data, this may be because you set drop_last=False "
                          "in training network. Currently, drop_last=False for "
                          "DataLoader is not supported for training network. "
                          "Please set drop_last=True when defining DataLoader.",
                          feed_num, member_->local_scopes_.size()));
  }
Y
Yu Yang 已提交
1210 1211 1212 1213
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
1214
  size_t num_places = member_->places_.size();
1215 1216 1217 1218 1219
  bool allow_partial_feed = member_->AllowPartialFeed();

  size_t persistable_feed_len = -1UL;
  size_t non_persistable_feed_len = -1UL;

1220
  for (auto &pair : tensors) {
1221 1222 1223 1224
    bool is_persistable = member_->IsPersistable(pair.first);
    VLOG(3) << "Split " << (is_persistable ? "persistable" : "no persistable")
            << " data (" << pair.first << "), dim:" << pair.second.dims()
            << ", place: " << pair.second.place();
Y
Yu Yang 已提交
1225
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
1226
    bool is_cpu_place = platform::is_cpu_place(member_->places_.front());
1227 1228
    if (!is_persistable && num_places != lod_tensors.size() &&
        !allow_partial_feed) {
C
chengduo 已提交
1229
      auto error_info = string::Sprintf(
1230 1231 1232
          "The number(%d) of samples[%s] of current batch is less than the "
          "count(%d) of devices(%s), currently, it is not allowed. ",
          lod_tensors.size(), pair.first, num_places,
C
chengduo 已提交
1233 1234 1235 1236 1237 1238
          (is_cpu_place ? "CPU" : "GPU"));
      if (is_cpu_place) {
        error_info +=
            "You should set the environment variable CPU_NUM in the system "
            "to determine the number of devices you need.";
      }
1239
      PADDLE_THROW(platform::errors::PreconditionNotMet(error_info));
1240 1241 1242 1243
    } else if (is_persistable) {
      if (lod_tensors.size() == 1) {
        lod_tensors.reserve(num_places);
        auto &tensor = lod_tensors.front();
1244 1245 1246 1247 1248 1249
        PADDLE_ENFORCE_EQ(
            tensor.dims(), pair.second.dims(),
            platform::errors::PreconditionNotMet("The dim doesn't match."));
        PADDLE_ENFORCE_EQ(
            tensor.place(), member_->places_.at(0),
            platform::errors::PreconditionNotMet("The place doesn't match."));
1250 1251 1252 1253 1254 1255
        for (size_t i = 1; i < num_places; ++i) {
          lod_tensors.emplace_back();
          auto &tmp = lod_tensors.back();
          framework::TensorCopy(pair.second, member_->places_.at(i), &tmp);
        }
      }
1256
      if (lod_tensors.size() != num_places && !allow_partial_feed) {
1257 1258 1259 1260 1261 1262 1263 1264 1265
        auto error_info = string::Sprintf(
            "The number(%d) of samples[%s] of the current batch does not match "
            "the count(%d) of devices(%s). Because that %s is a persistable "
            "variable, you can feed just one sample, in that case, the input "
            "sample will be copied in %d copies and be sent to different "
            "places separately. If you need that different place has different "
            "value, you should feed %d samples.",
            lod_tensors.size(), pair.first, num_places,
            (is_cpu_place ? "CPU" : "GPU"), pair.first, num_places, num_places);
1266
        PADDLE_THROW(platform::errors::PreconditionNotMet(error_info));
1267
      }
C
chengduo 已提交
1268
    }
1269

1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
    if (allow_partial_feed) {
      if (is_persistable) {
        if (persistable_feed_len == -1UL) {
          persistable_feed_len = lod_tensors.size();
        } else {
          PADDLE_ENFORCE_EQ(
              persistable_feed_len, lod_tensors.size(),
              platform::errors::InvalidArgument(
                  "The feeded number of different persistable variables "
                  "should be the same"));
        }
      } else {
        if (non_persistable_feed_len == -1UL) {
          non_persistable_feed_len = lod_tensors.size();
        } else {
          PADDLE_ENFORCE_EQ(
              non_persistable_feed_len, lod_tensors.size(),
              platform::errors::InvalidArgument(
                  "The feeded number of different non-persistable variables "
                  "should be the same"));
        }
      }
    }

    for (size_t j = 0; j < lod_tensors.size(); ++j) {
1295 1296 1297 1298 1299
      auto *feed_scope = is_persistable ? member_->local_scopes_[j]
                                        : member_->local_exec_scopes_[j];
      auto *feed_var = feed_scope->Var(pair.first);

      auto t = feed_var->GetMutable<LoDTensor>();
1300 1301
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
1302 1303
    }
  }
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319

  if (allow_partial_feed && persistable_feed_len != -1UL &&
      non_persistable_feed_len != -1UL) {
    VLOG(10) << "Persistable len " << persistable_feed_len;
    VLOG(10) << "Non persistable len " << non_persistable_feed_len;
    PADDLE_ENFORCE_GE(persistable_feed_len, non_persistable_feed_len,
                      platform::errors::InvalidArgument(
                          "The feeded number of persistable variables should "
                          "not be less than non-persistable variables"));
  }

  if (non_persistable_feed_len != -1UL) {
    for (size_t i = 0; i < non_persistable_feed_len; ++i) {
      member_->SetHasFeed(i);
    }
  }
X
Xin Pan 已提交
1320 1321
}

X
Xin Pan 已提交
1322 1323 1324 1325 1326 1327 1328
ParallelExecutor::~ParallelExecutor() {
  for (auto &p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
  }
  delete member_;
}

1329
bool ParallelExecutor::EnableParallelGraphExecution(
X
Xin Pan 已提交
1330
    const ir::Graph &graph, const ExecutionStrategy &exec_strategy,
1331
    const BuildStrategy &build_strategy) const {
1332 1333 1334
  if (!FLAGS_enable_parallel_graph) {
    return false;
  }
1335

Y
Yancey1989 已提交
1336
  bool enable_parallel_graph = true;
1337

X
Xin Pan 已提交
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350
  for (ir::Node *node : graph.Nodes()) {
    if (node->IsVar() && node->Var()) {
      // TODO(Yancey1989): support sparse update in ParallelGraph mode.
      if (node->Var()->GetType() == proto::VarType::SELECTED_ROWS) {
        enable_parallel_graph = false;
        break;
      }
    } else if (node->IsOp() && node->Op()) {
      // TODO(Yancey1989): support pserver mode
      if (node->Op()->Type() == "send" || node->Op()->Type() == "recv") {
        enable_parallel_graph = false;
        break;
      }
1351 1352 1353
    }
  }

1354
  if (!member_->use_all_reduce_ || !member_->IsUseCUDA(member_->use_device_)) {
Y
Yancey1989 已提交
1355
    if (build_strategy.enable_sequential_execution_ ||
1356
        exec_strategy.type_ == ExecutionStrategy::ExecutorType::kExperimental) {
Y
Yancey1989 已提交
1357
      enable_parallel_graph = false;
1358 1359 1360 1361 1362 1363 1364 1365 1366
    }
  }

#ifdef WIN32
  VLOG(1) << "Windows has no support to parallel graph, enable_parallel_graph "
             "would be forced to false.";
  enable_parallel_graph = false;
#endif

Y
Yancey1989 已提交
1367
  return enable_parallel_graph;
1368 1369
}

1370 1371 1372 1373
const ir::Graph &ParallelExecutor::Graph() const {
  return member_->executor_->Graph();
}

Y
Yu Yang 已提交
1374
}  // namespace framework
Y
Yang Yang 已提交
1375
}  // namespace paddle
S
sneaxiy 已提交
1376

S
sneaxiy 已提交
1377
USE_PASS(reference_count_pass);
S
sneaxiy 已提交
1378
USE_PASS(eager_deletion_pass);
1379
USE_PASS(buffer_shared_inplace_pass);
1380
USE_PASS(buffer_shared_cross_op_memory_reuse_pass);
1381
USE_PASS(inplace_addto_op_pass);