parallel_executor.cc 8.8 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
Q
qiaolongfei 已提交
16

C
chengduoZH 已提交
17
#include <string>
18
#include <tuple>
Q
qiaolongfei 已提交
19
#include <vector>
Y
Yu Yang 已提交
20

Y
Yu Yang 已提交
21
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
22
#include "paddle/fluid/platform/nccl_helper.h"
Y
Yu Yang 已提交
23
#endif
Y
Yang Yang 已提交
24

Y
Yu Yang 已提交
25 26
#include "paddle/fluid/framework/details/multi_devices_graph_builder.h"
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
27
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
28

Y
Yang Yang 已提交
29
namespace paddle {
Y
Yu Yang 已提交
30 31
namespace framework {

Y
Yu Yang 已提交
32 33 34
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
35
      : places_(places) {}
Y
Yu Yang 已提交
36 37 38 39

  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
  Scope *global_scope_;
Y
Yu Yang 已提交
40
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
41

Y
Yu Yang 已提交
42
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
43
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
44
#endif
45 46

  std::vector<std::tuple<std::string, proto::VarType::Type, bool>> var_types_;
47
  bool own_local_scope;
Y
Yu Yang 已提交
48 49
};

50 51 52 53
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
54
ParallelExecutor::ParallelExecutor(
55
    const std::vector<platform::Place> &places,
Y
Yu Yang 已提交
56
    const std::unordered_set<std::string> &params,
57 58
    const std::unordered_set<std::string> &bcast_vars,
    const ProgramDesc &main_program, const std::string &loss_var_name,
Y
yuyang18 已提交
59
    Scope *scope, const std::vector<Scope *> &local_scopes,
60
    const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy,
61
    size_t num_trainers, size_t trainer_id)
Y
Yu Yang 已提交
62
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
63
  member_->global_scope_ = scope;
Y
Yu Yang 已提交
64

65
  // Step 1. Bcast the params to devs.
Y
Yu Yang 已提交
66
  // Create local scopes
67
  if (local_scopes.empty()) {
68
    member_->own_local_scope = true;
Y
Yu Yang 已提交
69 70
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
71
      member_->local_scopes_.emplace_back(&scope->NewScope());
72 73
    }
  } else {
74
    member_->own_local_scope = false;
75 76
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
77
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
78
    }
Y
Yu Yang 已提交
79 80
  }

Y
Yu Yang 已提交
81 82
// Bcast Parameters to all GPUs
#ifdef PADDLE_WITH_CUDA
T
typhoonzero 已提交
83
  auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
T
typhoonzero 已提交
84 85 86 87 88 89
  ncclUniqueId *nccl_id = nullptr;
  if (nccl_id_var != nullptr) {
    nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
  }
  member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
      member_->places_, nccl_id, num_trainers, trainer_id));
Y
Yu Yang 已提交
90
#endif
91 92 93
  if (platform::is_gpu_place(places[0]) && member_->local_scopes_.size() != 1 &&
      local_scopes.empty()) {  // Is CUDA
    BCastParamsToGPUs(bcast_vars);
Y
Yu Yang 已提交
94
  }
Y
Yu Yang 已提交
95
// Startup Program has been run. All local scopes has correct parameters.
Y
Yu Yang 已提交
96

Y
Yu Yang 已提交
97 98 99
// Step 2. Convert main_program to SSA form and dependency graph. Also, insert
// ncclOp
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
100 101
  details::MultiDevSSAGraphBuilder builder(
      member_->places_, loss_var_name, params, member_->local_scopes_,
Y
yuyang18 已提交
102
      member_->nccl_ctxs_.get(), build_strategy);
Y
Yu Yang 已提交
103
#else
Y
yuyang18 已提交
104 105 106
  details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name,
                                           params, member_->local_scopes_,
                                           build_strategy);
Y
Yu Yang 已提交
107
#endif
Y
Yu Yang 已提交
108
  auto graph = builder.Build(main_program);
Y
Yu Yang 已提交
109

Y
Yu Yang 已提交
110
  member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
Y
yuyang18 已提交
111
      exec_strategy, member_->local_scopes_, places, std::move(graph)));
Y
Yu Yang 已提交
112

Y
Yu Yang 已提交
113
  // Step 3. Create vars in each scope;
114 115 116
  for (auto *var : main_program.Block(0).AllVars()) {
    member_->var_types_.emplace_back(var->Name(), var->GetType(),
                                     var->Persistable());
Y
Yu Yang 已提交
117
  }
Y
Yu Yang 已提交
118 119 120
}

void ParallelExecutor::BCastParamsToGPUs(
121
    const std::unordered_set<std::string> &vars) const {
Y
Yu Yang 已提交
122
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
123
  auto *main_scope = member_->local_scopes_[0];
Y
Yu Yang 已提交
124

125 126
  for (auto &var : vars) {
    auto *main_var = main_scope->FindVar(var);
J
JiayiFeng 已提交
127
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      platform::NCCLGroupGuard guard;
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
        if (i == 0) {
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
143
          auto local_scope = member_->local_scopes_[i];
144
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
145
          t->Resize(dims);
146
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
147
        }
148 149 150 151 152 153 154 155 156 157 158 159
        auto &nccl_ctx = member_->nccl_ctxs_->at(place);
        platform::dynload::ncclBcast(buffer, numel, data_type, 0,
                                     nccl_ctx.comm_, nccl_ctx.stream());
      }
    } else {
      platform::CPUPlace cpu;
      for (size_t i = 1; i < member_->places_.size(); ++i) {
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
        t->Resize(dims);
        t->mutable_data(cpu, main_tensor.type());
        paddle::framework::TensorCopy(main_tensor, cpu, t);
Y
Yu Yang 已提交
160
      }
Y
Stash  
Yu Yang 已提交
161
    }
Y
Yu Yang 已提交
162
    member_->nccl_ctxs_->WaitAll();
Y
Stash  
Yu Yang 已提交
163
  }
Y
Yu Yang 已提交
164 165 166 167
#else
  PADDLE_THROW("Not compiled with CUDA");
#endif
}
Y
Yu Yang 已提交
168

Y
Yu Yang 已提交
169 170
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
X
Xin Pan 已提交
171
  platform::RecordBlock b(0);
172
  // Create local scopes.
Y
Yu Yang 已提交
173 174 175
  for (auto it = member_->local_scopes_.rbegin();
       it != member_->local_scopes_.rend(); ++it) {
    auto &scope = *it;
176 177 178 179 180 181 182 183 184 185 186 187 188
    Scope &local_scope = scope->NewScope();
    *scope->Var(details::kLocalExecScopeName)->GetMutable<Scope *>() =
        &local_scope;

    for (auto &name_type_pair : member_->var_types_) {
      if (scope->FindVar(std::get<0>(name_type_pair)) != nullptr) {
        continue;
      }

      if (std::get<2>(name_type_pair)) {  // Persistable
        InitializeVariable(scope->Var(std::get<0>(name_type_pair)),
                           std::get<1>(name_type_pair));
      } else {
Y
update  
Yu Yang 已提交
189
        InitializeVariable(local_scope.Var(std::get<0>(name_type_pair)),
190 191 192 193 194
                           std::get<1>(name_type_pair));
      }
    }
  }

Y
Yu Yang 已提交
195 196 197
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
198 199 200 201 202 203 204 205 206 207

  // Wait All computational streams
  for (auto p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
  }
  for (auto &scope : member_->local_scopes_) {
    auto &local_scope =
        *scope->Var(details::kLocalExecScopeName)->GetMutable<Scope *>();
    scope->DeleteScope(local_scope);
  }
Y
Yu Yang 已提交
208
}
Y
Yu Yang 已提交
209

Y
Yu Yang 已提交
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
229 230 231 232 233
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
234 235
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
236
      auto t =
Y
Yu Yang 已提交
237
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
238 239
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
240 241 242 243
    }
  }
}

244 245 246 247 248 249 250 251
ParallelExecutor::~ParallelExecutor() {
  if (member_->own_local_scope) {
    for (size_t i = 1; i < member_->local_scopes_.size(); ++i) {
      member_->global_scope_->DeleteScope(member_->local_scopes_[i]);
    }
  }
}

Y
Yu Yang 已提交
252
}  // namespace framework
Y
Yang Yang 已提交
253
}  // namespace paddle