parallel_executor.cc 8.3 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
Q
qiaolongfei 已提交
16

C
chengduoZH 已提交
17
#include <string>
18
#include <tuple>
Q
qiaolongfei 已提交
19
#include <vector>
Y
Yu Yang 已提交
20

Y
Yu Yang 已提交
21
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
22
#include "paddle/fluid/platform/nccl_helper.h"
Y
Yu Yang 已提交
23
#endif
Y
Yang Yang 已提交
24

Y
yuyang18 已提交
25
#include "paddle/fluid/framework/details/graph_builder_factory.h"
Y
yuyang18 已提交
26
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
27
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
28
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
29

Y
Yang Yang 已提交
30
namespace paddle {
Y
Yu Yang 已提交
31 32
namespace framework {

Y
Yu Yang 已提交
33 34 35
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
36
      : places_(places) {}
Y
Yu Yang 已提交
37 38 39 40

  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
  Scope *global_scope_;
Y
Yu Yang 已提交
41
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
42

Y
Yu Yang 已提交
43
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
44
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
45
#endif
46
  bool own_local_scope;
Y
Yu Yang 已提交
47 48
};

49 50 51 52
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
53
ParallelExecutor::ParallelExecutor(
54
    const std::vector<platform::Place> &places,
Y
Yu Yang 已提交
55
    const std::unordered_set<std::string> &params,
56 57
    const std::unordered_set<std::string> &bcast_vars,
    const ProgramDesc &main_program, const std::string &loss_var_name,
Y
yuyang18 已提交
58
    Scope *scope, const std::vector<Scope *> &local_scopes,
59
    const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy,
60
    size_t num_trainers, size_t trainer_id)
Y
Yu Yang 已提交
61
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
62
  member_->global_scope_ = scope;
Y
Yu Yang 已提交
63

64
  // Step 1. Bcast the params to devs.
Y
Yu Yang 已提交
65
  // Create local scopes
66
  if (local_scopes.empty()) {
67
    member_->own_local_scope = true;
Y
Yu Yang 已提交
68 69
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
70
      member_->local_scopes_.emplace_back(&scope->NewScope());
71 72
    }
  } else {
73
    member_->own_local_scope = false;
74 75
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
76
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
77
    }
Y
Yu Yang 已提交
78 79
  }

Y
Yu Yang 已提交
80 81
// Bcast Parameters to all GPUs
#ifdef PADDLE_WITH_CUDA
T
typhoonzero 已提交
82
  auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
T
typhoonzero 已提交
83 84 85 86 87 88
  ncclUniqueId *nccl_id = nullptr;
  if (nccl_id_var != nullptr) {
    nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
  }
  member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
      member_->places_, nccl_id, num_trainers, trainer_id));
Y
Yu Yang 已提交
89
#endif
90 91 92
  if (platform::is_gpu_place(places[0]) && member_->local_scopes_.size() != 1 &&
      local_scopes.empty()) {  // Is CUDA
    BCastParamsToGPUs(bcast_vars);
Y
Yu Yang 已提交
93
  }
Y
yuyang18 已提交
94 95 96 97 98 99 100 101 102 103
  // Startup Program has been run. All local scopes has correct parameters.

  // Step 2. Create vars in each scope;
  std::vector<details::VariableInfo> var_infos;
  for (auto *var : main_program.Block(0).AllVars()) {
    var_infos.emplace_back();
    var_infos.back().name_ = var->Name();
    var_infos.back().type_ = var->GetType();
    var_infos.back().persistable_ = var->Persistable();
  }
Y
Yu Yang 已提交
104

Y
yuyang18 已提交
105 106 107
  // Step 3. Convert main_program to SSA form and dependency graph. Also, insert
  // ncclOp
  details::SSAGraphBuilderFactory builder_factory(
Y
Yancey1989 已提交
108
      member_->places_, loss_var_name, params, member_->local_scopes_,
Y
yuyang18 已提交
109
      build_strategy);
110

Y
yuyang18 已提交
111 112
#ifdef PADDLE_WITH_CUDA
  builder_factory.SetNCCLContextMap(member_->nccl_ctxs_.get());
Y
Yu Yang 已提交
113
#endif
114 115 116 117
  builder_.reset(builder_factory.Create().get());
  if (builder_.get() == nullptr) {
    VLOG(3) << "builder is null.";
  }
Y
Yu Yang 已提交
118

Y
Yu Yang 已提交
119
  member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
Y
yuyang18 已提交
120
      exec_strategy, member_->local_scopes_, places,
121
      builder_->Build(main_program)));
Y
Yu Yang 已提交
122

Y
yuyang18 已提交
123 124 125
  member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
      exec_strategy, member_->local_scopes_, std::move(var_infos),
      member_->places_, std::move(member_->executor_)));
Y
Yu Yang 已提交
126 127 128
}

void ParallelExecutor::BCastParamsToGPUs(
129
    const std::unordered_set<std::string> &vars) const {
Y
Yu Yang 已提交
130
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
131
  auto *main_scope = member_->local_scopes_[0];
Y
Yu Yang 已提交
132

133 134
  for (auto &var : vars) {
    auto *main_var = main_scope->FindVar(var);
J
JiayiFeng 已提交
135
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      platform::NCCLGroupGuard guard;
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
        if (i == 0) {
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
151
          auto local_scope = member_->local_scopes_[i];
152
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
153
          t->Resize(dims);
154
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
155
        }
156
        auto &nccl_ctx = member_->nccl_ctxs_->at(place);
157 158

        if (builder_.get() != nullptr &&
159 160
            builder_->GetRemoteVarDeviceId(var) != -1) {
          int place_id = builder_->GetRemoteVarDeviceId(var);
161 162 163 164 165 166
          platform::dynload::ncclBcast(buffer, numel, data_type, place_id,
                                       nccl_ctx.comm_, nccl_ctx.stream());
        } else {
          platform::dynload::ncclBcast(buffer, numel, data_type, 0,
                                       nccl_ctx.comm_, nccl_ctx.stream());
        }
167 168 169 170 171 172 173 174 175
      }
    } else {
      platform::CPUPlace cpu;
      for (size_t i = 1; i < member_->places_.size(); ++i) {
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
        t->Resize(dims);
        t->mutable_data(cpu, main_tensor.type());
        paddle::framework::TensorCopy(main_tensor, cpu, t);
Y
Yu Yang 已提交
176
      }
Y
Stash  
Yu Yang 已提交
177
    }
Y
Yu Yang 已提交
178
    member_->nccl_ctxs_->WaitAll();
Y
Stash  
Yu Yang 已提交
179
  }
Y
Yu Yang 已提交
180 181 182 183
#else
  PADDLE_THROW("Not compiled with CUDA");
#endif
}
Y
Yu Yang 已提交
184

Y
Yu Yang 已提交
185 186
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
X
Xin Pan 已提交
187
  platform::RecordBlock b(0);
Y
Yu Yang 已提交
188 189 190
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
Y
Yu Yang 已提交
191
}
Y
Yu Yang 已提交
192

Y
Yu Yang 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
212 213 214 215 216
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
217 218
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
219
      auto t =
Y
Yu Yang 已提交
220
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
221 222
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
223 224 225 226
    }
  }
}

227 228 229 230 231 232 233 234
ParallelExecutor::~ParallelExecutor() {
  if (member_->own_local_scope) {
    for (size_t i = 1; i < member_->local_scopes_.size(); ++i) {
      member_->global_scope_->DeleteScope(member_->local_scopes_[i]);
    }
  }
}

Y
Yu Yang 已提交
235
}  // namespace framework
Y
Yang Yang 已提交
236
}  // namespace paddle