parallel_executor.cc 15.3 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
C
chengduoZH 已提交
16
#include <string>
17
#include <tuple>
Q
qiaolongfei 已提交
18
#include <vector>
C
chengduo 已提交
19
#include "paddle/fluid/framework/ir/graph_helper.h"
Y
Yu Yang 已提交
20

X
clean  
Xin Pan 已提交
21
#include "paddle/fluid/framework/ir/graph.h"
X
Xin Pan 已提交
22

P
peizhilin 已提交
23
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yu Yang 已提交
24
#include "paddle/fluid/platform/nccl_helper.h"
Y
Yu Yang 已提交
25
#endif
Y
Yang Yang 已提交
26

Y
yuyang18 已提交
27
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
28
#include "paddle/fluid/framework/details/multi_devices_helper.h"
S
sneaxiy 已提交
29
#include "paddle/fluid/framework/details/reference_count_pass_helper.h"
Y
yuyang18 已提交
30
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
31
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
32
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
33

Y
Yu Yang 已提交
34
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
35
#include "gperftools/profiler.h"
Y
Yu Yang 已提交
36
#endif
Y
Yu Yang 已提交
37
DEFINE_string(pe_profile_fname, "",
Y
Yu Yang 已提交
38 39 40
              "Profiler filename for PE, which generated by gperftools."
              "Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable.");

Y
Yang Yang 已提交
41
namespace paddle {
Y
Yu Yang 已提交
42 43
namespace framework {

Y
Yu Yang 已提交
44
static std::once_flag gProfileOnce;
Y
Yu Yang 已提交
45
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
46
static bool gProfileStarted = false;
Y
Yu Yang 已提交
47
#endif
Y
Yu Yang 已提交
48 49 50
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
51
      : places_(places) {
Y
Yu Yang 已提交
52
    if (!FLAGS_pe_profile_fname.empty()) {
Y
Yu Yang 已提交
53 54
      std::call_once(gProfileOnce, [] {
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
55
        ProfilerStart(FLAGS_pe_profile_fname.c_str());
Y
Yu Yang 已提交
56 57 58
        gProfileStarted = true;
#else
        LOG(WARNING) << "Paddle is not compiled with gperftools. "
Y
Yu Yang 已提交
59
                        "FLAGS_pe_profile_fname will be ignored";
Y
Yu Yang 已提交
60 61 62 63
#endif
      });
    }
  }
Y
Yu Yang 已提交
64

65 66 67 68 69 70 71 72 73 74 75
  ~ParallelExecutorPrivate() {
    if (own_local_scope_) {
      for (size_t i = 1; i < local_scopes_.size(); ++i) {
        // Skip the first scope, since it is the global scope.
        Scope *local_scope = local_scopes_[i];
        if (global_scope_->HasKid(local_scope)) {
          global_scope_->DeleteScope(local_scope);
        }
      }
    }
  }
S
sneaxiy 已提交
76

S
sneaxiy 已提交
77 78 79 80 81 82 83 84 85 86 87 88 89 90
  std::unique_ptr<ir::Graph> PrepareGCAndRefCnts(
      std::unique_ptr<ir::Graph> graph, size_t max_memory_size);

  inline bool HasGarbageCollectors() const { return !gcs_.empty(); }

  void ResetRuntimeReferenceCount(const std::vector<std::string> &fetch_tensors,
                                  const std::string &fetched_var_name) {
    for (size_t i = 0; i < runtime_ref_cnts_.size(); ++i) {
      for (auto &pair : global_ref_cnts_[i]) {
        runtime_ref_cnts_[i][pair.first] = pair.second;
      }

      for (auto &fetch_name : fetch_tensors) {
        runtime_ref_cnts_[i].erase(fetch_name);
S
sneaxiy 已提交
91
      }
S
sneaxiy 已提交
92
      runtime_ref_cnts_[i].erase(fetched_var_name);
S
sneaxiy 已提交
93 94 95
    }
  }

Y
Yu Yang 已提交
96 97
  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
98
  Scope *global_scope_;  // not owned
Y
Yu Yang 已提交
99
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
100

P
peizhilin 已提交
101
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yu Yang 已提交
102
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
103
#endif
C
chengduoZH 已提交
104 105
  bool own_local_scope_;
  bool use_cuda_;
106
  bool use_all_reduce_;
S
sneaxiy 已提交
107

S
sneaxiy 已提交
108 109 110 111 112 113
  // global_ref_cnts_ is only initialized when ParallelExecutor constructs, and
  // then keeps unchanged
  // Before each iteration, runtime_ref_cnts_ is reset to global_ref_cnts_
  std::vector<details::ReferenceCountMap> global_ref_cnts_;
  std::vector<details::AtomicReferenceCountMap> runtime_ref_cnts_;
  details::GarbageCollectorMap gcs_;
Y
Yu Yang 已提交
114 115
};

S
sneaxiy 已提交
116 117 118 119 120 121 122
std::unique_ptr<ir::Graph> ParallelExecutorPrivate::PrepareGCAndRefCnts(
    std::unique_ptr<ir::Graph> graph, size_t max_memory_size) {
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &place = places_[i];
    if (gcs_.count(place) > 0) {
      continue;
    }
S
sneaxiy 已提交
123
    std::unique_ptr<GarbageCollector> gc;
S
sneaxiy 已提交
124
#ifdef PADDLE_WITH_CUDA
S
sneaxiy 已提交
125 126
    if (platform::is_gpu_place(place)) {
      if (IsFastEagerDeletionModeEnabled()) {
S
sneaxiy 已提交
127 128
        gc.reset(new UnsafeFastGPUGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
129
      } else {
S
sneaxiy 已提交
130 131
        gc.reset(new StreamGarbageCollector(
            boost::get<platform::CUDAPlace>(place), max_memory_size));
S
sneaxiy 已提交
132 133
      }
      VLOG(10) << "Created " << i << "-th GarbageCollector at " << place;
S
sneaxiy 已提交
134
    } else {
S
sneaxiy 已提交
135
#endif
S
sneaxiy 已提交
136 137 138 139 140 141 142
      if (platform::is_cpu_place(place)) {
        gc.reset(new CPUGarbageCollector(boost::get<platform::CPUPlace>(place),
                                         max_memory_size));
        VLOG(10) << "Created GarbageCollector at " << place;
      } else {
        PADDLE_THROW("Unsupported place for garbage collection");
      }
S
sneaxiy 已提交
143 144 145 146
#ifdef PADDLE_WITH_CUDA
    }
#endif

S
sneaxiy 已提交
147
    gcs_.emplace(place, std::move(gc));
S
sneaxiy 已提交
148 149
  }

S
sneaxiy 已提交
150
  if (!gcs_.empty()) {
S
sneaxiy 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
    std::vector<details::LastLiveOpsOfVars> last_live_ops_of_vars;

    auto ref_cnt_pass =
        ir::PassRegistry::Instance().Get("reference_count_pass");
    ref_cnt_pass->SetNotOwned(details::kGlobalReferenceCount,
                              &global_ref_cnts_);
    ref_cnt_pass->SetNotOwned(details::kLastLiveOpsOfVars,
                              &last_live_ops_of_vars);
    graph = ref_cnt_pass->Apply(std::move(graph));
    VLOG(10) << "ReferenceCountPass Applied";

    auto eager_deletion_pass =
        ir::PassRegistry::Instance().Get("eager_deletion_pass");
    eager_deletion_pass->SetNotOwned(details::kRuntimeReferenceCount,
                                     &runtime_ref_cnts_);
    eager_deletion_pass->SetNotOwned(details::kGarbageCollector, &gcs_);
    eager_deletion_pass->SetNotOwned(details::kLastLiveOpsOfVars,
                                     &last_live_ops_of_vars);
    eager_deletion_pass->SetNotOwned(details::kAllPlaces, &places_);
    graph = eager_deletion_pass->Apply(std::move(graph));
    VLOG(10) << "EagerDeletionPass Applied";
  }

  return graph;
}

177 178 179 180
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
181
ParallelExecutor::ParallelExecutor(
182
    const std::vector<platform::Place> &places,
Y
Yu Yang 已提交
183
    const std::unordered_set<std::string> &params,
184 185
    const std::unordered_set<std::string> &bcast_vars,
    const ProgramDesc &main_program, const std::string &loss_var_name,
Y
yuyang18 已提交
186
    Scope *scope, const std::vector<Scope *> &local_scopes,
187
    const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy,
188
    size_t num_trainers, size_t trainer_id)
Y
Yu Yang 已提交
189
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
190
  member_->global_scope_ = scope;
191
  member_->use_cuda_ = exec_strategy.use_cuda_;
192 193 194 195 196 197 198 199
  member_->use_all_reduce_ =
      build_strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce;

  if (!member_->use_all_reduce_) {
    PADDLE_ENFORCE(places.size() > 1,
                   "If you set build_strategy.reduce with 'Reduce',"
                   "the number of places must be greater than 1.");
  }
Y
Yu Yang 已提交
200

201
  // Step 1. Bcast the params to devs.
Y
Yu Yang 已提交
202
  // Create local scopes
203
  if (local_scopes.empty()) {
C
chengduoZH 已提交
204
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
205 206
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
207
      member_->local_scopes_.emplace_back(&scope->NewScope());
208 209
    }
  } else {
C
chengduoZH 已提交
210
    member_->own_local_scope_ = false;
211 212
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
213
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
214
    }
Y
Yu Yang 已提交
215 216
  }

C
chengduoZH 已提交
217
  if (member_->use_cuda_) {
Y
Yu Yang 已提交
218
// Bcast Parameters to all GPUs
P
peizhilin 已提交
219
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
C
chengduoZH 已提交
220 221 222 223 224 225 226 227 228
    auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
    ncclUniqueId *nccl_id = nullptr;
    if (nccl_id_var != nullptr) {
      nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
    }
    member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
        member_->places_, nccl_id, num_trainers, trainer_id));
#else
    PADDLE_THROW("Not compiled with CUDA");
Y
Yu Yang 已提交
229
#endif
C
chengduoZH 已提交
230 231 232
  }

  if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
Y
Yancey1989 已提交
233
    BCastParamsToDevices(bcast_vars);
Y
Yu Yang 已提交
234
  }
235
// Startup Program has been run. All local scopes has correct parameters.
Y
yuyang18 已提交
236

237
// Step 2. Convert main_program to SSA form and dependency graph. Also, insert
X
Xin Pan 已提交
238
// ncclOp
P
peizhilin 已提交
239
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
240
  std::unique_ptr<ir::Graph> graph = build_strategy.Apply(
X
Xin Pan 已提交
241
      main_program, member_->places_, loss_var_name, params,
242
      member_->local_scopes_, member_->use_cuda_, member_->nccl_ctxs_.get());
S
sneaxiy 已提交
243 244 245 246 247
#else
  std::unique_ptr<ir::Graph> graph =
      build_strategy.Apply(main_program, member_->places_, loss_var_name,
                           params, member_->local_scopes_, member_->use_cuda_);
#endif
S
sneaxiy 已提交
248 249 250

  auto max_memory_size = GetEagerDeletionThreshold();
  if (max_memory_size >= 0) {
S
sneaxiy 已提交
251 252
    graph = member_->PrepareGCAndRefCnts(std::move(graph),
                                         static_cast<size_t>(max_memory_size));
S
sneaxiy 已提交
253
  }
X
Xin Pan 已提交
254

255 256 257 258 259 260 261 262 263 264 265
  // Step 3. Create vars in each scope. Passes may also create new vars.
  //         skip control vars and empty vars
  std::vector<details::VariableInfo> var_infos;
  for (auto &node : graph->Nodes()) {
    if (node->IsVar() && !node->IsCtrlVar() && node->Var()) {
      var_infos.emplace_back();
      var_infos.back().name_ = node->Var()->Name();
      var_infos.back().type_ = node->Var()->GetType();
      var_infos.back().persistable_ = node->Var()->Persistable();
    }
  }
W
Wu Yi 已提交
266 267
  // If the loss_var_name is given, the number of graph should be only one.
  if (loss_var_name.size()) {
C
chengduo 已提交
268 269 270 271 272 273 274 275 276 277 278
    size_t graph_num = ir::GraphNum(*graph);
    if (graph_num > 1) {
      LOG(WARNING)
          << "The number of graph should be only one, "
             "but the current graph has "
          << ir::GraphNum(*graph)
          << " sub_graphs. If you want to see the nodes of the "
             "sub_graphs, you should use 'FLAGS_print_sub_graph_dir' "
             "to specify the output dir. NOTES: if you not do training, "
             "please don't pass loss_var_name.";
    }
W
Wu Yi 已提交
279 280
  }

Y
yuyang18 已提交
281 282 283 284 285 286
  if (exec_strategy.type_ == ExecutionStrategy::kDefault) {
    member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, places, std::move(graph)));
  } else {
    member_->executor_.reset(new details::FastThreadedSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, places, std::move(graph)));
C
chengduoZH 已提交
287
  }
Y
yuyang18 已提交
288 289 290 291

  member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
      exec_strategy, member_->local_scopes_, std::move(var_infos),
      member_->places_, std::move(member_->executor_)));
Y
Yu Yang 已提交
292 293
}

Y
Yancey1989 已提交
294
void ParallelExecutor::BCastParamsToDevices(
295
    const std::unordered_set<std::string> &vars) const {
X
Xin Pan 已提交
296
  // the initializing bcast, all vars would be bcast from device(0).
297
  for (auto &var : vars) {
X
Xin Pan 已提交
298
    framework::Variable *main_var = member_->local_scopes_[0]->FindVar(var);
J
JiayiFeng 已提交
299
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
300 301 302 303
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
304
    if (!main_tensor.IsInitialized()) {
M
minqiyang 已提交
305
      VLOG(3) << "one in var not inited, return!";
306 307
      continue;
    }
308 309
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
P
peizhilin 已提交
310
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
311
      std::vector<void *> buffers;
312 313 314 315 316
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
317

X
Xin Pan 已提交
318
        if (i == 0) {
319 320
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
321
          auto local_scope = member_->local_scopes_[i];
322
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
323
          t->Resize(dims);
324
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
325
        }
326
        buffers.push_back(buffer);
327
      }
328

329 330 331 332 333 334
      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
                        "variables' buffer size to bcast NOT equal to places");
      {
        platform::NCCLGroupGuard guard;
        for (size_t i = 0; i < member_->places_.size(); ++i) {
          auto &nccl_ctx = member_->nccl_ctxs_->at(member_->places_[i]);
X
Xin Pan 已提交
335 336
          platform::dynload::ncclBcast(buffers[i], numel, data_type, 0,
                                       nccl_ctx.comm_, nccl_ctx.stream());
337
        }
338
        member_->nccl_ctxs_->WaitAll();
339
      }
C
chengduoZH 已提交
340 341 342
#else
      PADDLE_THROW("Not compiled with CUDA");
#endif
343 344
    } else {
      platform::CPUPlace cpu;
Y
Yancey1989 已提交
345
      for (size_t i = 0; i < member_->places_.size(); ++i) {
X
Xin Pan 已提交
346
        if (i == 0) continue;
Y
Yancey1989 已提交
347

348 349
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
C
chengduo 已提交
350 351 352 353

        // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
        if (member_->use_all_reduce_ || member_->use_cuda_ ||
            var == "@LR_DECAY_COUNTER@") {
354 355 356 357 358 359
          t->Resize(dims);
          t->mutable_data(cpu, main_tensor.type());
          paddle::framework::TensorCopy(main_tensor, cpu, t);
        } else {
          t->ShareDataWith(main_tensor);
        }
Y
Yu Yang 已提交
360
      }
Y
Stash  
Yu Yang 已提交
361 362
    }
  }
Y
Yu Yang 已提交
363
}
Y
Yu Yang 已提交
364

Y
Yu Yang 已提交
365 366
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
Y
Yu Yang 已提交
367 368 369 370 371 372
#ifdef WITH_GPERFTOOLS
  if (gProfileStarted) {
    ProfilerFlush();
  }
#endif

X
Xin Pan 已提交
373
  platform::RecordBlock b(0);
S
sneaxiy 已提交
374 375
  if (member_->HasGarbageCollectors()) {
    member_->ResetRuntimeReferenceCount(fetch_tensors, fetched_var_name);
S
sneaxiy 已提交
376
  }
S
sneaxiy 已提交
377 378 379
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
Y
Yu Yang 已提交
380
}
Y
Yu Yang 已提交
381

Y
Yu Yang 已提交
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
401 402 403 404 405
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
406 407
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
408
      auto t =
Y
Yu Yang 已提交
409
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
410 411
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
412 413 414 415
    }
  }
}

416
ParallelExecutor::~ParallelExecutor() {
417 418
  for (auto &p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
C
chengduozh 已提交
419
  }
S
sneaxiy 已提交
420
  delete member_;
421 422
}

Y
Yu Yang 已提交
423
}  // namespace framework
Y
Yang Yang 已提交
424
}  // namespace paddle
S
sneaxiy 已提交
425

S
sneaxiy 已提交
426
USE_PASS(reference_count_pass);
S
sneaxiy 已提交
427
USE_PASS(eager_deletion_pass);