parallel_executor.cc 15.2 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
C
chengduoZH 已提交
16
#include <string>
17
#include <tuple>
Q
qiaolongfei 已提交
18
#include <vector>
C
chengduo 已提交
19
#include "paddle/fluid/framework/ir/graph_helper.h"
Y
Yu Yang 已提交
20

X
clean  
Xin Pan 已提交
21
#include "paddle/fluid/framework/ir/graph.h"
X
Xin Pan 已提交
22

P
peizhilin 已提交
23
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yu Yang 已提交
24
#include "paddle/fluid/platform/nccl_helper.h"
Y
Yu Yang 已提交
25
#endif
Y
Yang Yang 已提交
26

Y
yuyang18 已提交
27
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
28
#include "paddle/fluid/framework/details/multi_devices_helper.h"
Y
Yancey1989 已提交
29
#include "paddle/fluid/framework/details/parallel_ssa_graph_executor.h"
Y
yuyang18 已提交
30
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
31
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
32
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
33

Y
Yang Yang 已提交
34
namespace paddle {
Y
Yu Yang 已提交
35 36
namespace framework {

Y
Yu Yang 已提交
37 38 39
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
40
      : places_(places) {}
Y
Yu Yang 已提交
41

42 43 44 45 46 47 48 49 50 51 52
  ~ParallelExecutorPrivate() {
    if (own_local_scope_) {
      for (size_t i = 1; i < local_scopes_.size(); ++i) {
        // Skip the first scope, since it is the global scope.
        Scope *local_scope = local_scopes_[i];
        if (global_scope_->HasKid(local_scope)) {
          global_scope_->DeleteScope(local_scope);
        }
      }
    }
  }
Y
Yu Yang 已提交
53 54
  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
55
  Scope *global_scope_;  // not owned
Y
Yu Yang 已提交
56
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yancey1989 已提交
57
  std::vector<std::unique_ptr<details::SSAGraphExecutor>> executors_;
Y
Yu Yang 已提交
58

P
peizhilin 已提交
59
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yu Yang 已提交
60
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
61
#endif
C
chengduoZH 已提交
62 63
  bool own_local_scope_;
  bool use_cuda_;
64
  bool use_all_reduce_;
Y
Yu Yang 已提交
65 66
};

67 68 69 70
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
71
ParallelExecutor::ParallelExecutor(
72
    const std::vector<platform::Place> &places,
Y
Yu Yang 已提交
73
    const std::unordered_set<std::string> &params,
74 75
    const std::unordered_set<std::string> &bcast_vars,
    const ProgramDesc &main_program, const std::string &loss_var_name,
Y
yuyang18 已提交
76
    Scope *scope, const std::vector<Scope *> &local_scopes,
77
    const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy,
78
    size_t num_trainers, size_t trainer_id)
Y
Yu Yang 已提交
79
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
80
  member_->global_scope_ = scope;
81
  member_->use_cuda_ = exec_strategy.use_cuda_;
82 83 84 85 86 87 88
  member_->use_all_reduce_ =
      build_strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce;

  if (!member_->use_all_reduce_) {
    PADDLE_ENFORCE(places.size() > 1,
                   "If you set build_strategy.reduce with 'Reduce',"
                   "the number of places must be greater than 1.");
Y
Yancey1989 已提交
89 90 91
    PADDLE_ENFORCE(exec_strategy.type_ != ExecutionStrategy::kParallelGraph,
                   "You should set build_strategy.reduce with 'AllReduce' for "
                   "ParallelGraph executor type");
92
  }
Y
Yu Yang 已提交
93

94
  // Step 1. Bcast the params to devs.
Y
Yu Yang 已提交
95
  // Create local scopes
96
  if (local_scopes.empty()) {
C
chengduoZH 已提交
97
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
98 99
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
100
      member_->local_scopes_.emplace_back(&scope->NewScope());
101 102
    }
  } else {
C
chengduoZH 已提交
103
    member_->own_local_scope_ = false;
104 105
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
106
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
107
    }
Y
Yu Yang 已提交
108 109
  }

C
chengduoZH 已提交
110
  if (member_->use_cuda_) {
Y
Yu Yang 已提交
111
// Bcast Parameters to all GPUs
P
peizhilin 已提交
112
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
C
chengduoZH 已提交
113
    auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
Y
Yancey1989 已提交
114 115
    std::unique_ptr<ncclUniqueId> nccl_id = nullptr;
    bool need_group_call = true;
C
chengduoZH 已提交
116
    if (nccl_id_var != nullptr) {
Y
Yancey1989 已提交
117 118 119 120 121 122 123 124 125
      nccl_id.reset(nccl_id_var->GetMutable<ncclUniqueId>());
    } else if (exec_strategy.type_ == ExecutionStrategy::kParallelGraph) {
      nccl_id.reset(new ncclUniqueId());
      PADDLE_ENFORCE(platform::dynload::ncclGetUniqueId(nccl_id.get()));
      *member_->global_scope_->Var(NCCL_ID_VARNAME)
           ->GetMutable<ncclUniqueId>() = *nccl_id.get();
      need_group_call = false;
    } else {
      // init nccl_id in NCCLContextMap
C
chengduoZH 已提交
126
    }
Y
Yancey1989 已提交
127

C
chengduoZH 已提交
128
    member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
Y
Yancey1989 已提交
129 130
        member_->places_, nccl_id.get(), num_trainers, trainer_id,
        need_group_call));
C
chengduoZH 已提交
131 132
#else
    PADDLE_THROW("Not compiled with CUDA");
Y
Yu Yang 已提交
133
#endif
C
chengduoZH 已提交
134 135
  }
  if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
Y
Yancey1989 已提交
136
    BCastParamsToDevices(bcast_vars);
Y
Yu Yang 已提交
137
  }
Y
Yancey1989 已提交
138
  // Startup Program has been run. All local scopes has correct parameters.
Y
yuyang18 已提交
139

Y
Yancey1989 已提交
140 141 142
  // Step 2. Convert main_program to SSA form and dependency graph. Also, insert
  // ncclOp
  std::vector<std::unique_ptr<ir::Graph>> graphs;
P
peizhilin 已提交
143
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yancey1989 已提交
144 145 146 147 148 149 150 151 152 153 154 155 156 157
  if (exec_strategy.type_ == ExecutionStrategy::kParallelGraph) {
    for (size_t i = 0; i < member_->places_.size(); ++i) {
      std::unique_ptr<ir::Graph> graph = build_strategy.Apply(
          main_program, {member_->places_[i]}, loss_var_name, params,
          {member_->local_scopes_[i]}, member_->use_cuda_,
          member_->nccl_ctxs_.get());
      graphs.push_back(std::move(graph));
    }
  } else {
    std::unique_ptr<ir::Graph> graph = build_strategy.Apply(
        main_program, member_->places_, loss_var_name, params,
        member_->local_scopes_, member_->use_cuda_, member_->nccl_ctxs_.get());
    graphs.push_back(std::move(graph));
  }
S
sneaxiy 已提交
158 159

  auto max_memory_size = GetEagerDeletionThreshold();
Y
Yancey1989 已提交
160 161 162
  // FIXME(Yancey1989): need to fix on parallel graph mode
  if (max_memory_size >= 0 &&
      exec_strategy.type_ != ExecutionStrategy::kParallelGraph) {
S
sneaxiy 已提交
163 164 165 166 167 168 169 170 171
    for (auto &place : member_->places_) {
      if (!platform::is_gpu_place(place)) continue;
      auto gpu_place = boost::get<platform::CUDAPlace>(place);
      if (gcs_[gpu_place.device] == nullptr) {
        ref_cnts_[gpu_place.device].reset(new details::ReferenceCountMap());
        cur_ref_cnts_[gpu_place.device].reset(
            new details::AtomicReferenceCountMap());
        gcs_[gpu_place.device].reset(
            new StreamGarbageCollector<Tensor>(gpu_place, max_memory_size));
S
sneaxiy 已提交
172 173
      }
    }
S
sneaxiy 已提交
174
    if (!gcs_.empty()) {
Y
Yancey1989 已提交
175 176 177 178 179 180 181 182 183
      for (size_t i = 0; i < graphs.size(); ++i) {
        auto ref_cnt_pass =
            ir::PassRegistry::Instance().Get("reference_count_pass");
        ref_cnt_pass->SetNotOwned(details::kGlobalReferenceCount, &ref_cnts_);
        ref_cnt_pass->SetNotOwned(details::kCurReferenceCount, &cur_ref_cnts_);
        ref_cnt_pass->SetNotOwned(details::kGarbageCollector, &gcs_);
        graphs[0] = ref_cnt_pass->Apply(std::move(graphs[i]));
        graphs[0]->SetNotOwned("garbage_collector", &gcs_);
      }
S
sneaxiy 已提交
184 185
    }
  }
C
chengduoZH 已提交
186
#else
187 188 189
  std::unique_ptr<ir::Graph> graph =
      build_strategy.Apply(main_program, member_->places_, loss_var_name,
                           params, member_->local_scopes_, member_->use_cuda_);
Y
Yancey1989 已提交
190
  graphs.push_back(std::move(graph));
Y
Yu Yang 已提交
191
#endif
X
Xin Pan 已提交
192

193 194
  // Step 3. Create vars in each scope. Passes may also create new vars.
  //         skip control vars and empty vars
Y
Yancey1989 已提交
195 196 197 198 199 200 201 202 203 204
  std::vector<std::vector<details::VariableInfo>> var_infos_list;
  for (size_t i = 0; i < graphs.size(); ++i) {
    std::vector<details::VariableInfo> var_infos;
    for (auto &node : graphs[i]->Nodes()) {
      if (node->IsVar() && !node->IsCtrlVar() && node->Var()) {
        var_infos.emplace_back();
        var_infos.back().name_ = node->Var()->Name();
        var_infos.back().type_ = node->Var()->GetType();
        var_infos.back().persistable_ = node->Var()->Persistable();
      }
205
    }
Y
Yancey1989 已提交
206
    var_infos_list.emplace_back(std::move(var_infos));
207
  }
Y
Yancey1989 已提交
208

W
Wu Yi 已提交
209 210
  // If the loss_var_name is given, the number of graph should be only one.
  if (loss_var_name.size()) {
Y
Yancey1989 已提交
211
    size_t graph_num = ir::GraphNum(*graphs[0]);
C
chengduo 已提交
212 213 214 215
    if (graph_num > 1) {
      LOG(WARNING)
          << "The number of graph should be only one, "
             "but the current graph has "
Y
Yancey1989 已提交
216
          << ir::GraphNum(*graphs[0])
C
chengduo 已提交
217 218 219 220 221
          << " sub_graphs. If you want to see the nodes of the "
             "sub_graphs, you should use 'FLAGS_print_sub_graph_dir' "
             "to specify the output dir. NOTES: if you not do training, "
             "please don't pass loss_var_name.";
    }
W
Wu Yi 已提交
222 223
  }

Y
yuyang18 已提交
224
  if (exec_strategy.type_ == ExecutionStrategy::kDefault) {
Y
Yancey1989 已提交
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
    /**
    for (size_t i = 0; i < member_->places_.size(); ++i) {
      std::vector<details::VariableInfo> var_infos;
      for (auto &node : graphs[i]->Nodes()) {
        if (node->IsVar() && !node->IsCtrlVar() && node->Var()) {
          var_infos.emplace_back();
          var_infos.back().name_ = node->Var()->Name();
          var_infos.back().type_ = node->Var()->GetType();
          var_infos.back().persistable_ = node->Var()->Persistable();
        }
      }

      std::vector<platform::Place> places = {member_->places_[i]};
      std::vector<framework::Scope *> scopes = {member_->local_scopes_[i]};
      std::unique_ptr<details::ThreadedSSAGraphExecutor> p(new
    details::ThreadedSSAGraphExecutor(
        exec_strategy, scopes, places, std::move(graphs[i])));

      member_->executors_.push_back(std::move(p));

      member_->executors_[i].reset(new details::ScopeBufferedSSAGraphExecutor(
        exec_strategy, scopes, std::move(var_infos), places,
        std::move(member_->executors_[i])));
    }**/
Y
yuyang18 已提交
249
    member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
Y
Yancey1989 已提交
250 251 252 253
        exec_strategy, member_->local_scopes_, places, std::move(graphs[0])));
  } else if (exec_strategy.type_ == ExecutionStrategy::kParallelGraph) {
    member_->executor_.reset(new details::ParallelSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, places, graphs));
Y
yuyang18 已提交
254 255
  } else {
    member_->executor_.reset(new details::FastThreadedSSAGraphExecutor(
Y
Yancey1989 已提交
256
        exec_strategy, member_->local_scopes_, places, std::move(graphs[0])));
C
chengduoZH 已提交
257
  }
Y
yuyang18 已提交
258 259

  member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
Y
Yancey1989 已提交
260
      exec_strategy, member_->local_scopes_, std::move(var_infos_list),
Y
yuyang18 已提交
261
      member_->places_, std::move(member_->executor_)));
Y
Yu Yang 已提交
262 263
}

Y
Yancey1989 已提交
264
void ParallelExecutor::BCastParamsToDevices(
265
    const std::unordered_set<std::string> &vars) const {
X
Xin Pan 已提交
266
  // the initializing bcast, all vars would be bcast from device(0).
267
  for (auto &var : vars) {
X
Xin Pan 已提交
268
    framework::Variable *main_var = member_->local_scopes_[0]->FindVar(var);
J
JiayiFeng 已提交
269
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
270 271 272 273
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
274
    if (!main_tensor.IsInitialized()) {
M
minqiyang 已提交
275
      VLOG(3) << "one in var not inited, return!";
276 277
      continue;
    }
278 279
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
P
peizhilin 已提交
280
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
281
      std::vector<void *> buffers;
282 283 284 285 286
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
287

X
Xin Pan 已提交
288
        if (i == 0) {
289 290
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
291
          auto local_scope = member_->local_scopes_[i];
292
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
293
          t->Resize(dims);
294
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
295
        }
296
        buffers.push_back(buffer);
297
      }
298

299 300 301 302 303 304
      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
                        "variables' buffer size to bcast NOT equal to places");
      {
        platform::NCCLGroupGuard guard;
        for (size_t i = 0; i < member_->places_.size(); ++i) {
          auto &nccl_ctx = member_->nccl_ctxs_->at(member_->places_[i]);
X
Xin Pan 已提交
305 306
          platform::dynload::ncclBcast(buffers[i], numel, data_type, 0,
                                       nccl_ctx.comm_, nccl_ctx.stream());
307
        }
308
        member_->nccl_ctxs_->WaitAll();
309
      }
C
chengduoZH 已提交
310 311 312
#else
      PADDLE_THROW("Not compiled with CUDA");
#endif
313 314
    } else {
      platform::CPUPlace cpu;
Y
Yancey1989 已提交
315
      for (size_t i = 0; i < member_->places_.size(); ++i) {
X
Xin Pan 已提交
316
        if (i == 0) continue;
Y
Yancey1989 已提交
317

318 319
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
C
chengduo 已提交
320 321 322 323

        // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
        if (member_->use_all_reduce_ || member_->use_cuda_ ||
            var == "@LR_DECAY_COUNTER@") {
324 325 326 327 328 329
          t->Resize(dims);
          t->mutable_data(cpu, main_tensor.type());
          paddle::framework::TensorCopy(main_tensor, cpu, t);
        } else {
          t->ShareDataWith(main_tensor);
        }
Y
Yu Yang 已提交
330
      }
Y
Stash  
Yu Yang 已提交
331 332
    }
  }
Y
Yu Yang 已提交
333
}
Y
Yu Yang 已提交
334

Y
Yu Yang 已提交
335 336
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
X
Xin Pan 已提交
337
  platform::RecordBlock b(0);
S
sneaxiy 已提交
338 339 340
#ifdef PADDLE_WITH_CUDA
  if (!gcs_.empty()) {
    ResetReferenceCount();
S
sneaxiy 已提交
341 342 343 344 345 346 347
    for (auto &pair : cur_ref_cnts_) {
      auto &name_map = *(pair.second);
      for (auto &fetch_name : fetch_tensors) {
        name_map.erase(fetch_name);
      }
      name_map.erase(fetched_var_name);
    }
S
sneaxiy 已提交
348 349
  }
#endif
S
sneaxiy 已提交
350 351 352
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
Y
Yu Yang 已提交
353
}
Y
Yu Yang 已提交
354

Y
Yu Yang 已提交
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
374 375 376 377 378
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
379 380
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
381
      auto t =
Y
Yu Yang 已提交
382
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
383 384
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
385 386 387 388
    }
  }
}

389
ParallelExecutor::~ParallelExecutor() {
390 391
  for (auto &p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
C
chengduozh 已提交
392
  }
S
sneaxiy 已提交
393 394
  // member_ must be destructed before gcs_ since the destructor of
  // ReferenceCountOpHandle use raw pointers of gcs_ inside.
S
sneaxiy 已提交
395
  member_.reset();
396 397
}

Y
Yu Yang 已提交
398
}  // namespace framework
Y
Yang Yang 已提交
399
}  // namespace paddle
S
sneaxiy 已提交
400 401 402
#ifdef PADDLE_WITH_CUDA
USE_PASS(reference_count_pass);
#endif