parallel_executor.cc 20.9 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
D
dzhwinter 已提交
16
#include <algorithm>
C
chengduoZH 已提交
17
#include <string>
18
#include <tuple>
Q
qiaolongfei 已提交
19
#include <vector>
C
chengduo 已提交
20
#include "paddle/fluid/framework/ir/graph_helper.h"
Y
Yu Yang 已提交
21

X
clean  
Xin Pan 已提交
22
#include "paddle/fluid/framework/ir/graph.h"
X
Xin Pan 已提交
23

Y
Yancey1989 已提交
24
#include "paddle/fluid/framework/details/all_reduce_deps_pass.h"
Q
Qiao Longfei 已提交
25
#include "paddle/fluid/framework/details/async_ssa_graph_executor.h"
Y
yuyang18 已提交
26
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
27
#include "paddle/fluid/framework/details/multi_devices_helper.h"
Y
Yancey1989 已提交
28
#include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h"
S
sneaxiy 已提交
29
#include "paddle/fluid/framework/details/reference_count_pass_helper.h"
Y
yuyang18 已提交
30
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
31
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
32
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
33

Y
Yu Yang 已提交
34
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
35
#include "gperftools/profiler.h"
Y
Yu Yang 已提交
36
#endif
Y
Yu Yang 已提交
37
DEFINE_string(pe_profile_fname, "",
Y
Yu Yang 已提交
38 39
              "Profiler filename for PE, which generated by gperftools."
              "Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable.");
40
DEFINE_bool(enable_parallel_graph, false,
Y
Yancey1989 已提交
41
            "Force disable parallel graph execution mode if set false.");
Y
Yu Yang 已提交
42

Y
Yang Yang 已提交
43
namespace paddle {
Y
Yu Yang 已提交
44 45
namespace framework {

Y
Yu Yang 已提交
46
static std::once_flag gProfileOnce;
Y
Yu Yang 已提交
47
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
48
static bool gProfileStarted = false;
Y
Yu Yang 已提交
49
#endif
Y
Yu Yang 已提交
50 51 52
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
53
      : places_(places) {
Y
Yu Yang 已提交
54
    if (!FLAGS_pe_profile_fname.empty()) {
Y
Yu Yang 已提交
55 56
      std::call_once(gProfileOnce, [] {
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
57
        ProfilerStart(FLAGS_pe_profile_fname.c_str());
Y
Yu Yang 已提交
58 59 60
        gProfileStarted = true;
#else
        LOG(WARNING) << "Paddle is not compiled with gperftools. "
Y
Yu Yang 已提交
61
                        "FLAGS_pe_profile_fname will be ignored";
Y
Yu Yang 已提交
62 63 64 65
#endif
      });
    }
  }
Y
Yu Yang 已提交
66

67 68 69 70 71 72 73 74 75 76 77
  ~ParallelExecutorPrivate() {
    if (own_local_scope_) {
      for (size_t i = 1; i < local_scopes_.size(); ++i) {
        // Skip the first scope, since it is the global scope.
        Scope *local_scope = local_scopes_[i];
        if (global_scope_->HasKid(local_scope)) {
          global_scope_->DeleteScope(local_scope);
        }
      }
    }
  }
S
sneaxiy 已提交
78

S
sneaxiy 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91 92
  std::unique_ptr<ir::Graph> PrepareGCAndRefCnts(
      std::unique_ptr<ir::Graph> graph, size_t max_memory_size);

  inline bool HasGarbageCollectors() const { return !gcs_.empty(); }

  void ResetRuntimeReferenceCount(const std::vector<std::string> &fetch_tensors,
                                  const std::string &fetched_var_name) {
    for (size_t i = 0; i < runtime_ref_cnts_.size(); ++i) {
      for (auto &pair : global_ref_cnts_[i]) {
        runtime_ref_cnts_[i][pair.first] = pair.second;
      }

      for (auto &fetch_name : fetch_tensors) {
        runtime_ref_cnts_[i].erase(fetch_name);
S
sneaxiy 已提交
93
      }
S
sneaxiy 已提交
94
      runtime_ref_cnts_[i].erase(fetched_var_name);
S
sneaxiy 已提交
95 96 97
    }
  }

D
dzhwinter 已提交
98
  BuildStrategy build_strategy_;
Y
Yu Yang 已提交
99 100
  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
101
  Scope *global_scope_;  // not owned
Y
Yu Yang 已提交
102
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
103

P
peizhilin 已提交
104
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yu Yang 已提交
105
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
106
#endif
C
chengduoZH 已提交
107 108
  bool own_local_scope_;
  bool use_cuda_;
109
  bool use_all_reduce_;
110
  size_t nranks_;
S
sneaxiy 已提交
111

S
sneaxiy 已提交
112 113 114 115 116 117
  // global_ref_cnts_ is only initialized when ParallelExecutor constructs, and
  // then keeps unchanged
  // Before each iteration, runtime_ref_cnts_ is reset to global_ref_cnts_
  std::vector<details::ReferenceCountMap> global_ref_cnts_;
  std::vector<details::AtomicReferenceCountMap> runtime_ref_cnts_;
  details::GarbageCollectorMap gcs_;
Y
Yu Yang 已提交
118 119
};

S
sneaxiy 已提交
120 121 122 123 124 125 126
std::unique_ptr<ir::Graph> ParallelExecutorPrivate::PrepareGCAndRefCnts(
    std::unique_ptr<ir::Graph> graph, size_t max_memory_size) {
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &place = places_[i];
    if (gcs_.count(place) > 0) {
      continue;
    }
S
sneaxiy 已提交
127
    std::unique_ptr<GarbageCollector> gc;
S
sneaxiy 已提交
128
#ifdef PADDLE_WITH_CUDA
S
sneaxiy 已提交
129 130
    if (platform::is_gpu_place(place)) {
      if (IsFastEagerDeletionModeEnabled()) {
S
sneaxiy 已提交
131 132
        gc.reset(new UnsafeFastGPUGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
133
      } else {
S
sneaxiy 已提交
134 135
        gc.reset(new StreamGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
136 137
      }
      VLOG(10) << "Created " << i << "-th GarbageCollector at " << place;
S
sneaxiy 已提交
138
    } else {
S
sneaxiy 已提交
139
#endif
S
sneaxiy 已提交
140 141 142 143 144 145 146
      if (platform::is_cpu_place(place)) {
        gc.reset(new CPUGarbageCollector(boost::get<platform::CPUPlace>(place),
                                         max_memory_size));
        VLOG(10) << "Created GarbageCollector at " << place;
      } else {
        PADDLE_THROW("Unsupported place for garbage collection");
      }
S
sneaxiy 已提交
147 148 149 150
#ifdef PADDLE_WITH_CUDA
    }
#endif

S
sneaxiy 已提交
151
    gcs_.emplace(place, std::move(gc));
S
sneaxiy 已提交
152 153
  }

S
sneaxiy 已提交
154
  if (!gcs_.empty()) {
S
sneaxiy 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
    std::vector<details::LastLiveOpsOfVars> last_live_ops_of_vars;

    auto ref_cnt_pass =
        ir::PassRegistry::Instance().Get("reference_count_pass");
    ref_cnt_pass->SetNotOwned(details::kGlobalReferenceCount,
                              &global_ref_cnts_);
    ref_cnt_pass->SetNotOwned(details::kLastLiveOpsOfVars,
                              &last_live_ops_of_vars);
    graph = ref_cnt_pass->Apply(std::move(graph));
    VLOG(10) << "ReferenceCountPass Applied";

    auto eager_deletion_pass =
        ir::PassRegistry::Instance().Get("eager_deletion_pass");
    eager_deletion_pass->SetNotOwned(details::kRuntimeReferenceCount,
                                     &runtime_ref_cnts_);
    eager_deletion_pass->SetNotOwned(details::kGarbageCollector, &gcs_);
    eager_deletion_pass->SetNotOwned(details::kLastLiveOpsOfVars,
                                     &last_live_ops_of_vars);
    eager_deletion_pass->SetNotOwned(details::kAllPlaces, &places_);
    graph = eager_deletion_pass->Apply(std::move(graph));
    VLOG(10) << "EagerDeletionPass Applied";
  }

  return graph;
}

181 182 183 184
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
185
ParallelExecutor::ParallelExecutor(
186
    const std::vector<platform::Place> &places,
187
    const std::unordered_set<std::string> &bcast_vars,
X
Xin Pan 已提交
188 189 190
    const std::string &loss_var_name, Scope *scope,
    const std::vector<Scope *> &local_scopes,
    const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy,
Q
Qiao Longfei 已提交
191
    ir::Graph *graph)
Y
Yu Yang 已提交
192
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
193
  member_->global_scope_ = scope;
194
  member_->use_cuda_ = exec_strategy.use_cuda_;
D
dzhwinter 已提交
195
  member_->build_strategy_ = build_strategy;
196 197
  member_->use_all_reduce_ =
      build_strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce;
X
Xin Pan 已提交
198
  member_->nranks_ = build_strategy.num_trainers_ * places.size();
199 200 201 202
  if (!member_->use_all_reduce_) {
    PADDLE_ENFORCE(places.size() > 1,
                   "If you set build_strategy.reduce with 'Reduce',"
                   "the number of places must be greater than 1.");
Y
Yancey1989 已提交
203 204
  }

205
  // Step 1. Bcast the bcast_vars to devs.
Y
Yu Yang 已提交
206
  // Create local scopes
207
  if (local_scopes.empty()) {
C
chengduoZH 已提交
208
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
209 210
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
211
      member_->local_scopes_.emplace_back(&scope->NewScope());
212 213
    }
  } else {
C
chengduoZH 已提交
214
    member_->own_local_scope_ = false;
215 216
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
217
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
218
    }
Y
Yu Yang 已提交
219 220
  }

Q
Qiao Longfei 已提交
221
  std::vector<ir::Graph *> graphs;
Q
Qiao Longfei 已提交
222 223 224
  if (build_strategy.async_mode_) {
    PADDLE_ENFORCE(!member_->use_cuda_,
                   "gpu mode does not support async_mode_ now!");
Q
Qiao Longfei 已提交
225 226 227 228 229 230
    graphs.push_back(graph);
    for (int i = 1; i < places.size(); ++i) {
      auto *tmp_graph = new ir::Graph(graph->OriginProgram());
      async_graphs_.emplace_back(tmp_graph);
      graphs.push_back(tmp_graph);
    }
Q
Qiao Longfei 已提交
231
  }
Q
Qiao Longfei 已提交
232

X
Xin Pan 已提交
233
  std::unique_ptr<ir::Graph> temp_owned_graph(graph);
Q
Qiao Longfei 已提交
234

Y
Yancey1989 已提交
235 236 237
  // FIXME(Yancey1989): parallel graph mode get better performance
  // in GPU allreduce distributed training. Need an elegant way to
  // choice the execution strategy.
X
Xin Pan 已提交
238 239
  build_strategy.enable_parallel_graph_ = EnableParallelGraphExecution(
      *temp_owned_graph, exec_strategy, build_strategy);
Y
Yancey1989 已提交
240 241 242 243
  if (build_strategy.enable_parallel_graph_)
    VLOG(0) << "The Executor would execute the graph by ParallelGraph "
               "Execution which can get better performance,"
            << "you can force it off by env FLAGS_enable_parallel_graph=0";
Y
Yancey1989 已提交
244

C
chengduoZH 已提交
245
  if (member_->use_cuda_) {
Y
Yu Yang 已提交
246
// Bcast Parameters to all GPUs
P
peizhilin 已提交
247
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yancey1989 已提交
248 249 250
    ncclUniqueId *nccl_id = nullptr;
    // gen_nccl_id operator can broadcast the ncclUniqueId for nccl2 collective
    // distributed training
C
chengduoZH 已提交
251
    auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
Y
Yancey1989 已提交
252
    if (nccl_id_var != nullptr) {
Y
Yancey1989 已提交
253
      nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
Y
Yancey1989 已提交
254
    }
255
    if (build_strategy.enable_parallel_graph_ && member_->nranks_ > 1UL) {
Y
Yancey1989 已提交
256 257 258 259
      if (nccl_id == nullptr) {
        local_nccl_id_.reset(new ncclUniqueId());
        platform::dynload::ncclGetUniqueId(local_nccl_id_.get());
        nccl_id = local_nccl_id_.get();
Y
Yancey1989 已提交
260
      }
C
chengduoZH 已提交
261
    }
Y
Yancey1989 已提交
262

C
chengduoZH 已提交
263
    member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
264 265
        member_->places_, nccl_id, build_strategy.num_trainers_,
        build_strategy.trainer_id_));
C
chengduoZH 已提交
266 267
#else
    PADDLE_THROW("Not compiled with CUDA");
Y
Yu Yang 已提交
268
#endif
C
chengduoZH 已提交
269 270
  }
  if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
Y
Yancey1989 已提交
271
    BCastParamsToDevices(bcast_vars);
Y
Yu Yang 已提交
272
  }
Q
Qiao Longfei 已提交
273
  // Startup Program has been run. All local scopes has correct parameters.
Y
yuyang18 已提交
274

Q
Qiao Longfei 已提交
275 276 277
  // Step 2. Convert main_program to SSA form and dependency graph. Also, insert
  // ncclOp
  std::vector<ir::Graph *> async_graphs(places.size());
P
peizhilin 已提交
278
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Q
Qiao Longfei 已提交
279 280
  if (build_strategy.async_mode_ && !build_strategy.is_distribution_) {
    VLOG(3) << "use local async mode";
Q
Qiao Longfei 已提交
281 282 283 284 285 286 287 288 289 290 291 292
    temp_owned_graph =
        build_strategy.Apply(std::move(temp_owned_graph), {member_->places_[0]},
                             loss_var_name, {member_->local_scopes_[0]}, 1,
                             member_->use_cuda_, member_->nccl_ctxs_.get());
    for (int i = 1; i < member_->places_.size(); ++i) {
      std::unique_ptr<ir::Graph> temp_graph(graphs[i]);
      temp_graph =
          build_strategy.Apply(std::move(temp_graph), {member_->places_[i]},
                               loss_var_name, {member_->local_scopes_[i]}, 1,
                               member_->use_cuda_, member_->nccl_ctxs_.get());
      async_graphs[i] = temp_graph.release();
    }
Q
Qiao Longfei 已提交
293
  } else {
Q
Qiao Longfei 已提交
294 295 296 297
    temp_owned_graph = build_strategy.Apply(
        std::move(temp_owned_graph), member_->places_, loss_var_name,
        member_->local_scopes_, member_->nranks_, member_->use_cuda_,
        member_->nccl_ctxs_.get());
Q
Qiao Longfei 已提交
298
  }
C
chengduoZH 已提交
299
#else
Q
Qiao Longfei 已提交
300
  if (build_strategy.async_mode_ && !build_strategy.is_distribution_) {
Q
Qiao Longfei 已提交
301
    VLOG(3) << "use local async mode";
Q
Qiao Longfei 已提交
302 303
    temp_owned_graph = build_strategy.Apply(
        std::move(temp_owned_graph), {member_->places_[0]}, loss_var_name,
Q
Qiao Longfei 已提交
304 305 306 307 308 309 310 311
        {member_->local_scopes_[0]}, 1, member_->use_cuda_);
    for (int i = 1; i < member_->places_.size(); ++i) {
      std::unique_ptr<ir::Graph> temp_graph(graphs[i]);
      temp_graph = build_strategy.Apply(
          std::move(temp_graph), {member_->places_[i]}, loss_var_name,
          {member_->local_scopes_[i]}, 1, member_->use_cuda_);
      async_graphs[i] = temp_graph.release();
    }
Q
can run  
Qiao Longfei 已提交
312
  } else {
Q
Qiao Longfei 已提交
313 314 315
    temp_owned_graph = build_strategy.Apply(
        std::move(temp_owned_graph), member_->places_, loss_var_name,
        member_->local_scopes_, member_->nranks_, member_->use_cuda_);
Q
can run  
Qiao Longfei 已提交
316
  }
X
Xin Pan 已提交
317

Y
Yu Yang 已提交
318
#endif
Y
Yancey1989 已提交
319
  auto max_memory_size = GetEagerDeletionThreshold();
D
dzhwinter 已提交
320 321
  VLOG(10) << "Eager Deletion Threshold "
           << static_cast<float>(max_memory_size) / (1 << 30);
Y
Yancey1989 已提交
322
  if (max_memory_size >= 0) {
X
Xin Pan 已提交
323 324 325 326 327 328
    graph = member_
                ->PrepareGCAndRefCnts(std::move(temp_owned_graph),
                                      static_cast<size_t>(max_memory_size))
                .release();
  } else {
    graph = temp_owned_graph.release();
Y
Yancey1989 已提交
329 330
  }

Q
Qiao Longfei 已提交
331 332
  async_graphs[0] = graph;

333 334
  // Step 3. Create vars in each scope. Passes may also create new vars.
  //         skip control vars and empty vars
Y
Yancey1989 已提交
335
  std::vector<details::VariableInfo> var_infos;
Q
Qiao Longfei 已提交
336 337 338 339 340 341
  for (auto &node : graph->Nodes()) {
    if (node->IsVar() && !node->IsCtrlVar() && node->Var()) {
      var_infos.emplace_back();
      var_infos.back().name_ = node->Var()->Name();
      var_infos.back().type_ = node->Var()->GetType();
      var_infos.back().persistable_ = node->Var()->Persistable();
Y
Yancey1989 已提交
342 343
    }
  }
Y
Yancey1989 已提交
344

W
Wu Yi 已提交
345 346
  // If the loss_var_name is given, the number of graph should be only one.
  if (loss_var_name.size()) {
Q
Qiao Longfei 已提交
347
    size_t graph_num = ir::GraphNum(*graph);
C
chengduo 已提交
348 349 350 351
    if (graph_num > 1) {
      LOG(WARNING)
          << "The number of graph should be only one, "
             "but the current graph has "
Q
Qiao Longfei 已提交
352
          << ir::GraphNum(*graph)
C
chengduo 已提交
353 354 355 356 357
          << " sub_graphs. If you want to see the nodes of the "
             "sub_graphs, you should use 'FLAGS_print_sub_graph_dir' "
             "to specify the output dir. NOTES: if you not do training, "
             "please don't pass loss_var_name.";
    }
W
Wu Yi 已提交
358 359
  }

360
  if (build_strategy.async_mode_ && !build_strategy.is_distribution_) {
Q
can run  
Qiao Longfei 已提交
361 362
    VLOG(3) << "use AsyncSSAGraphExecutor";
    member_->executor_.reset(new details::AsyncSSAGraphExecutor(
Q
Qiao Longfei 已提交
363
        exec_strategy, member_->local_scopes_, member_->places_, async_graphs));
Q
can run  
Qiao Longfei 已提交
364 365
  } else if (build_strategy.enable_parallel_graph_) {
    VLOG(3) << "use ParallelSSAGraphExecutor";
Y
Yancey1989 已提交
366
#ifdef PADDLE_WITH_CUDA
Y
Yancey1989 已提交
367 368
    // TODO(Yancey1989): Remove passing in the main_program when
    // allreduce_seq_pass doesn't need it as the attr.
Y
Yancey1989 已提交
369
    member_->executor_.reset(new details::ParallelSSAGraphExecutor(
X
Xin Pan 已提交
370
        exec_strategy, member_->local_scopes_, member_->places_, graph));
Y
Yancey1989 已提交
371 372 373 374
#else
    PADDLE_THROW(
        "Paddle should be compiled with CUDA for ParallelGraph Execution.");
#endif
Y
yuyang18 已提交
375
  } else {
Y
Yancey1989 已提交
376
    if (exec_strategy.type_ == ExecutionStrategy::kDefault) {
Q
can run  
Qiao Longfei 已提交
377
      VLOG(3) << "use ThreadedSSAGraphExecutor";
Y
Yancey1989 已提交
378
      member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
X
Xin Pan 已提交
379
          exec_strategy, member_->local_scopes_, member_->places_, graph));
Y
Yancey1989 已提交
380
    } else {
Q
can run  
Qiao Longfei 已提交
381
      VLOG(3) << "use FastThreadedSSAGraphExecutor";
Y
Yancey1989 已提交
382
      member_->executor_.reset(new details::FastThreadedSSAGraphExecutor(
X
Xin Pan 已提交
383
          exec_strategy, member_->local_scopes_, member_->places_, graph));
Y
Yancey1989 已提交
384
    }
C
chengduoZH 已提交
385
  }
Y
yuyang18 已提交
386

Q
can run  
Qiao Longfei 已提交
387
  VLOG(3) << "use ScopeBufferedSSAGraphExecutor";
Q
Qiao Longfei 已提交
388 389 390 391 392
  if (!build_strategy.async_mode_) {
    member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, std::move(var_infos),
        member_->places_, std::move(member_->executor_)));
  }
Y
Yu Yang 已提交
393 394
}

Y
Yancey1989 已提交
395
void ParallelExecutor::BCastParamsToDevices(
396
    const std::unordered_set<std::string> &vars) const {
Q
Qiao Longfei 已提交
397
  VLOG(3) << "BCastParamsToDevices";
X
Xin Pan 已提交
398
  // the initializing bcast, all vars would be bcast from device(0).
399
  for (auto &var : vars) {
X
Xin Pan 已提交
400
    framework::Variable *main_var = member_->local_scopes_[0]->FindVar(var);
J
JiayiFeng 已提交
401
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
402 403 404 405
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
406
    if (!main_tensor.IsInitialized()) {
M
minqiyang 已提交
407
      VLOG(3) << "one in var not inited, return!";
408 409
      continue;
    }
410 411
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
P
peizhilin 已提交
412
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
413
      std::vector<void *> buffers;
C
chengduo 已提交
414
      buffers.reserve(member_->places_.size());
415 416 417 418 419
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
420

X
Xin Pan 已提交
421
        if (i == 0) {
422 423
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
424
          auto local_scope = member_->local_scopes_[i];
425
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
426
          t->Resize(dims);
427
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
428
        }
429
        buffers.push_back(buffer);
430
      }
431

432 433 434 435 436 437
      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
                        "variables' buffer size to bcast NOT equal to places");
      {
        platform::NCCLGroupGuard guard;
        for (size_t i = 0; i < member_->places_.size(); ++i) {
          auto &nccl_ctx = member_->nccl_ctxs_->at(member_->places_[i]);
X
Xin Pan 已提交
438 439
          platform::dynload::ncclBcast(buffers[i], numel, data_type, 0,
                                       nccl_ctx.comm_, nccl_ctx.stream());
440
        }
441
        member_->nccl_ctxs_->WaitAll();
442
      }
C
chengduoZH 已提交
443 444 445
#else
      PADDLE_THROW("Not compiled with CUDA");
#endif
446 447
    } else {
      platform::CPUPlace cpu;
C
chengduo 已提交
448
      for (size_t i = 1; i < member_->places_.size(); ++i) {
449 450
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
C
chengduo 已提交
451

Q
Qiao Longfei 已提交
452
        auto copy_memory = [&] {
453 454 455
          t->Resize(dims);
          t->mutable_data(cpu, main_tensor.type());
          paddle::framework::TensorCopy(main_tensor, cpu, t);
Q
can run  
Qiao Longfei 已提交
456 457
        };

Q
Qiao Longfei 已提交
458
        auto share_memory = [&] { t->ShareDataWith(main_tensor); };
Q
can run  
Qiao Longfei 已提交
459 460 461 462 463 464 465

        // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
        if (member_->build_strategy_.async_mode_) {
          share_memory();
        } else if (member_->use_all_reduce_ || member_->use_cuda_ ||
                   var == "@LR_DECAY_COUNTER@") {
          copy_memory();
466
        } else {
Q
can run  
Qiao Longfei 已提交
467
          share_memory();
468
        }
Y
Yu Yang 已提交
469
      }
Y
Stash  
Yu Yang 已提交
470 471
    }
  }
Y
Yu Yang 已提交
472
}
Y
Yu Yang 已提交
473

Y
Yu Yang 已提交
474 475
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
Y
Yu Yang 已提交
476 477 478
#ifdef WITH_GPERFTOOLS
  if (gProfileStarted) {
    ProfilerFlush();
S
sneaxiy 已提交
479 480
  }
#endif
Y
Yu Yang 已提交
481

X
Xin Pan 已提交
482
  platform::RecordBlock b(0);
S
sneaxiy 已提交
483 484
  if (member_->HasGarbageCollectors()) {
    member_->ResetRuntimeReferenceCount(fetch_tensors, fetched_var_name);
S
sneaxiy 已提交
485
  }
S
sneaxiy 已提交
486 487 488
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
Y
Yu Yang 已提交
489
}
Y
Yu Yang 已提交
490

Y
Yu Yang 已提交
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
510 511 512 513 514
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
515 516
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
517
      auto t =
Y
Yu Yang 已提交
518
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
519 520
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
521 522 523 524
    }
  }
}

X
Xin Pan 已提交
525 526 527 528 529 530 531
ParallelExecutor::~ParallelExecutor() {
  for (auto &p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
  }
  delete member_;
}

532
bool ParallelExecutor::EnableParallelGraphExecution(
X
Xin Pan 已提交
533
    const ir::Graph &graph, const ExecutionStrategy &exec_strategy,
534
    const BuildStrategy &build_strategy) const {
Y
Yancey1989 已提交
535
  if (!FLAGS_enable_parallel_graph) return false;
536

Y
Yancey1989 已提交
537
  bool enable_parallel_graph = true;
538

X
Xin Pan 已提交
539 540 541 542 543 544 545 546 547 548 549 550 551
  for (ir::Node *node : graph.Nodes()) {
    if (node->IsVar() && node->Var()) {
      // TODO(Yancey1989): support sparse update in ParallelGraph mode.
      if (node->Var()->GetType() == proto::VarType::SELECTED_ROWS) {
        enable_parallel_graph = false;
        break;
      }
    } else if (node->IsOp() && node->Op()) {
      // TODO(Yancey1989): support pserver mode
      if (node->Op()->Type() == "send" || node->Op()->Type() == "recv") {
        enable_parallel_graph = false;
        break;
      }
552 553 554 555 556
    }
  }

  if (!member_->use_all_reduce_ || !member_->use_cuda_)

Y
Yancey1989 已提交
557 558 559
    if (build_strategy.enable_sequential_execution_ ||
        exec_strategy.type_ == ExecutionStrategy::ExecutorType::kExperimental)
      enable_parallel_graph = false;
Y
Yancey1989 已提交
560
  return enable_parallel_graph;
561 562
}

Y
Yu Yang 已提交
563
}  // namespace framework
Y
Yang Yang 已提交
564
}  // namespace paddle
S
sneaxiy 已提交
565

S
sneaxiy 已提交
566
USE_PASS(reference_count_pass);
S
sneaxiy 已提交
567
USE_PASS(eager_deletion_pass);