parallel_executor.cc 8.0 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
Q
qiaolongfei 已提交
16

C
chengduoZH 已提交
17
#include <string>
18
#include <tuple>
Q
qiaolongfei 已提交
19
#include <vector>
Y
Yu Yang 已提交
20

Y
Yu Yang 已提交
21
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
22
#include "paddle/fluid/platform/nccl_helper.h"
Y
Yu Yang 已提交
23
#endif
Y
Yang Yang 已提交
24

Y
yuyang18 已提交
25
#include "paddle/fluid/framework/details/graph_builder_factory.h"
Y
yuyang18 已提交
26
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
27
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
28
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
29

Y
Yang Yang 已提交
30
namespace paddle {
Y
Yu Yang 已提交
31 32
namespace framework {

Y
Yu Yang 已提交
33 34 35
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
36
      : places_(places) {}
Y
Yu Yang 已提交
37 38 39 40

  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
  Scope *global_scope_;
Y
Yu Yang 已提交
41
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
42

Y
Yu Yang 已提交
43
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
44
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
45
#endif
C
chengduoZH 已提交
46 47
  bool own_local_scope_;
  bool use_cuda_;
Y
Yu Yang 已提交
48 49
};

50 51 52 53
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
54
ParallelExecutor::ParallelExecutor(
55
    const std::vector<platform::Place> &places,
Y
Yu Yang 已提交
56
    const std::unordered_set<std::string> &params,
57 58
    const std::unordered_set<std::string> &bcast_vars,
    const ProgramDesc &main_program, const std::string &loss_var_name,
Y
yuyang18 已提交
59
    Scope *scope, const std::vector<Scope *> &local_scopes,
60
    const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy,
61
    size_t num_trainers, size_t trainer_id)
Y
Yu Yang 已提交
62
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
63
  member_->global_scope_ = scope;
64
  member_->use_cuda_ = exec_strategy.use_cuda_;
Y
Yu Yang 已提交
65

66
  // Step 1. Bcast the params to devs.
Y
Yu Yang 已提交
67
  // Create local scopes
68
  if (local_scopes.empty()) {
C
chengduoZH 已提交
69
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
70 71
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
72
      member_->local_scopes_.emplace_back(&scope->NewScope());
73 74
    }
  } else {
C
chengduoZH 已提交
75
    member_->own_local_scope_ = false;
76 77
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
78
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
79
    }
Y
Yu Yang 已提交
80 81
  }

C
chengduoZH 已提交
82
  if (member_->use_cuda_) {
Y
Yu Yang 已提交
83 84
// Bcast Parameters to all GPUs
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
85 86 87 88 89 90 91 92 93
    auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
    ncclUniqueId *nccl_id = nullptr;
    if (nccl_id_var != nullptr) {
      nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
    }
    member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
        member_->places_, nccl_id, num_trainers, trainer_id));
#else
    PADDLE_THROW("Not compiled with CUDA");
Y
Yu Yang 已提交
94
#endif
C
chengduoZH 已提交
95 96 97
  }

  if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
98
    BCastParamsToGPUs(bcast_vars);
Y
Yu Yang 已提交
99
  }
Y
yuyang18 已提交
100 101 102 103 104 105 106 107 108 109
  // Startup Program has been run. All local scopes has correct parameters.

  // Step 2. Create vars in each scope;
  std::vector<details::VariableInfo> var_infos;
  for (auto *var : main_program.Block(0).AllVars()) {
    var_infos.emplace_back();
    var_infos.back().name_ = var->Name();
    var_infos.back().type_ = var->GetType();
    var_infos.back().persistable_ = var->Persistable();
  }
Y
Yu Yang 已提交
110

Y
yuyang18 已提交
111 112 113 114
  // Step 3. Convert main_program to SSA form and dependency graph. Also, insert
  // ncclOp

  details::SSAGraphBuilderFactory builder_factory(
Y
Yu Yang 已提交
115
      member_->places_, loss_var_name, params, member_->local_scopes_,
Y
yuyang18 已提交
116
      build_strategy);
C
chengduoZH 已提交
117
  if (member_->use_cuda_) {
Y
yuyang18 已提交
118
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
119 120 121
    builder_factory.SetNCCLContextMap(member_->nccl_ctxs_.get());
#else
    PADDLE_THROW("Not compiled with CUDA");
Y
Yu Yang 已提交
122
#endif
C
chengduoZH 已提交
123
  }
Y
yuyang18 已提交
124

Y
Yu Yang 已提交
125
  member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
Y
yuyang18 已提交
126 127
      exec_strategy, member_->local_scopes_, places,
      builder_factory.Create()->Build(main_program)));
Y
Yu Yang 已提交
128

Y
yuyang18 已提交
129 130 131
  member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
      exec_strategy, member_->local_scopes_, std::move(var_infos),
      member_->places_, std::move(member_->executor_)));
Y
Yu Yang 已提交
132 133 134
}

void ParallelExecutor::BCastParamsToGPUs(
135
    const std::unordered_set<std::string> &vars) const {
Y
Yu Yang 已提交
136
  auto *main_scope = member_->local_scopes_[0];
Y
Yu Yang 已提交
137

138 139
  for (auto &var : vars) {
    auto *main_var = main_scope->FindVar(var);
J
JiayiFeng 已提交
140
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
141 142 143 144 145 146
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
C
chengduoZH 已提交
147
#ifdef PADDLE_WITH_CUDA
148 149 150 151 152 153 154 155 156
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      platform::NCCLGroupGuard guard;
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
        if (i == 0) {
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
157
          auto local_scope = member_->local_scopes_[i];
158
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
159
          t->Resize(dims);
160
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
161
        }
162 163 164 165
        auto &nccl_ctx = member_->nccl_ctxs_->at(place);
        platform::dynload::ncclBcast(buffer, numel, data_type, 0,
                                     nccl_ctx.comm_, nccl_ctx.stream());
      }
C
chengduoZH 已提交
166 167 168 169
      member_->nccl_ctxs_->WaitAll();
#else
      PADDLE_THROW("Not compiled with CUDA");
#endif
170 171 172 173 174 175 176 177
    } else {
      platform::CPUPlace cpu;
      for (size_t i = 1; i < member_->places_.size(); ++i) {
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
        t->Resize(dims);
        t->mutable_data(cpu, main_tensor.type());
        paddle::framework::TensorCopy(main_tensor, cpu, t);
Y
Yu Yang 已提交
178
      }
Y
Stash  
Yu Yang 已提交
179 180
    }
  }
Y
Yu Yang 已提交
181
}
Y
Yu Yang 已提交
182

Y
Yu Yang 已提交
183 184
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
X
Xin Pan 已提交
185
  platform::RecordBlock b(0);
Y
Yu Yang 已提交
186 187 188
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
Y
Yu Yang 已提交
189
}
Y
Yu Yang 已提交
190

Y
Yu Yang 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
210 211 212 213 214
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
215 216
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
217
      auto t =
Y
Yu Yang 已提交
218
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
219 220
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
221 222 223 224
    }
  }
}

225
ParallelExecutor::~ParallelExecutor() {
C
chengduoZH 已提交
226
  if (member_->own_local_scope_) {
227 228 229 230 231 232
    for (size_t i = 1; i < member_->local_scopes_.size(); ++i) {
      member_->global_scope_->DeleteScope(member_->local_scopes_[i]);
    }
  }
}

Y
Yu Yang 已提交
233
}  // namespace framework
Y
Yang Yang 已提交
234
}  // namespace paddle