parallel_executor.cc 8.2 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
Q
qiaolongfei 已提交
16

C
chengduoZH 已提交
17
#include <string>
18
#include <tuple>
Q
qiaolongfei 已提交
19
#include <vector>
Y
Yu Yang 已提交
20

Y
Yu Yang 已提交
21
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
22
#include "paddle/fluid/platform/nccl_helper.h"
Y
Yu Yang 已提交
23
#endif
Y
Yang Yang 已提交
24

Y
Yu Yang 已提交
25 26
#include "paddle/fluid/framework/details/multi_devices_graph_builder.h"
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
27
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
28

Y
Yang Yang 已提交
29
namespace paddle {
Y
Yu Yang 已提交
30 31
namespace framework {

Y
Yu Yang 已提交
32 33 34
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
35
      : places_(places) {}
Y
Yu Yang 已提交
36 37 38 39

  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
  Scope *global_scope_;
Y
Yu Yang 已提交
40
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
41

Y
Yu Yang 已提交
42
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
43
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
44
#endif
45 46

  std::vector<std::tuple<std::string, proto::VarType::Type, bool>> var_types_;
Y
Yu Yang 已提交
47 48
};

49 50 51 52
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
53
ParallelExecutor::ParallelExecutor(
54 55
    size_t num_threads, bool use_event,
    const std::vector<platform::Place> &places,
Y
Yu Yang 已提交
56
    const std::unordered_set<std::string> &params,
57 58 59
    const std::unordered_set<std::string> &bcast_vars,
    const ProgramDesc &main_program, const std::string &loss_var_name,
    Scope *scope, const std::vector<Scope *> &local_scopes, bool allow_op_delay)
Y
Yu Yang 已提交
60
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
61
  member_->global_scope_ = scope;
Y
Yu Yang 已提交
62

63
  // Step 1. Bcast the params to devs.
Y
Yu Yang 已提交
64
  // Create local scopes
65
  if (local_scopes.empty()) {
Y
Yu Yang 已提交
66 67
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
68
      member_->local_scopes_.emplace_back(&scope->NewScope());
69 70 71 72
    }
  } else {
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
73
      member_->local_scopes_.emplace_back(local_scopes[i]);
74
    }
Y
Yu Yang 已提交
75 76
  }

Y
Yu Yang 已提交
77 78 79 80
// Bcast Parameters to all GPUs
#ifdef PADDLE_WITH_CUDA
  member_->nccl_ctxs_.reset(new platform::NCCLContextMap(member_->places_));
#endif
81 82 83
  if (platform::is_gpu_place(places[0]) && member_->local_scopes_.size() != 1 &&
      local_scopes.empty()) {  // Is CUDA
    BCastParamsToGPUs(bcast_vars);
Y
Yu Yang 已提交
84
  }
Y
Yu Yang 已提交
85
// Startup Program has been run. All local scopes has correct parameters.
Y
Yu Yang 已提交
86

Y
Yu Yang 已提交
87 88 89
// Step 2. Convert main_program to SSA form and dependency graph. Also, insert
// ncclOp
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
90 91 92
  details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name,
                                           params, member_->local_scopes_,
                                           member_->nccl_ctxs_.get());
Y
Yu Yang 已提交
93 94 95 96
#else
  details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name,
                                           params, member_->local_scopes_);
#endif
Y
Yu Yang 已提交
97
  auto graph = builder.Build(main_program);
Y
Yu Yang 已提交
98

Y
Yu Yang 已提交
99
  member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
X
Xin Pan 已提交
100 101
      num_threads, use_event, member_->local_scopes_, places, std::move(graph),
      allow_op_delay));
Y
Yu Yang 已提交
102

Y
Yu Yang 已提交
103
  // Step 3. Create vars in each scope;
104 105 106
  for (auto *var : main_program.Block(0).AllVars()) {
    member_->var_types_.emplace_back(var->Name(), var->GetType(),
                                     var->Persistable());
Y
Yu Yang 已提交
107
  }
Y
Yu Yang 已提交
108 109 110
}

void ParallelExecutor::BCastParamsToGPUs(
111
    const std::unordered_set<std::string> &vars) const {
Y
Yu Yang 已提交
112
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
113
  auto *main_scope = member_->local_scopes_[0];
Y
Yu Yang 已提交
114

115 116
  for (auto &var : vars) {
    auto *main_var = main_scope->FindVar(var);
J
JiayiFeng 已提交
117
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      platform::NCCLGroupGuard guard;
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
        if (i == 0) {
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
133
          auto local_scope = member_->local_scopes_[i];
134
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
135
          t->Resize(dims);
136
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
137
        }
138 139 140 141 142 143 144 145 146 147 148 149
        auto &nccl_ctx = member_->nccl_ctxs_->at(place);
        platform::dynload::ncclBcast(buffer, numel, data_type, 0,
                                     nccl_ctx.comm_, nccl_ctx.stream());
      }
    } else {
      platform::CPUPlace cpu;
      for (size_t i = 1; i < member_->places_.size(); ++i) {
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
        t->Resize(dims);
        t->mutable_data(cpu, main_tensor.type());
        paddle::framework::TensorCopy(main_tensor, cpu, t);
Y
Yu Yang 已提交
150
      }
Y
Stash  
Yu Yang 已提交
151
    }
Y
Yu Yang 已提交
152
    member_->nccl_ctxs_->WaitAll();
Y
Stash  
Yu Yang 已提交
153
  }
Y
Yu Yang 已提交
154 155 156 157
#else
  PADDLE_THROW("Not compiled with CUDA");
#endif
}
Y
Yu Yang 已提交
158

Y
Yu Yang 已提交
159 160
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
X
Xin Pan 已提交
161
  platform::RecordBlock b(0);
162
  // Create local scopes.
Y
Yu Yang 已提交
163 164 165
  for (auto it = member_->local_scopes_.rbegin();
       it != member_->local_scopes_.rend(); ++it) {
    auto &scope = *it;
166 167 168 169 170 171 172 173 174 175 176 177 178
    Scope &local_scope = scope->NewScope();
    *scope->Var(details::kLocalExecScopeName)->GetMutable<Scope *>() =
        &local_scope;

    for (auto &name_type_pair : member_->var_types_) {
      if (scope->FindVar(std::get<0>(name_type_pair)) != nullptr) {
        continue;
      }

      if (std::get<2>(name_type_pair)) {  // Persistable
        InitializeVariable(scope->Var(std::get<0>(name_type_pair)),
                           std::get<1>(name_type_pair));
      } else {
Y
update  
Yu Yang 已提交
179
        InitializeVariable(local_scope.Var(std::get<0>(name_type_pair)),
180 181 182 183 184
                           std::get<1>(name_type_pair));
      }
    }
  }

Y
Yu Yang 已提交
185 186 187
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
188 189 190 191 192 193 194 195 196 197

  // Wait All computational streams
  for (auto p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
  }
  for (auto &scope : member_->local_scopes_) {
    auto &local_scope =
        *scope->Var(details::kLocalExecScopeName)->GetMutable<Scope *>();
    scope->DeleteScope(local_scope);
  }
Y
Yu Yang 已提交
198
}
Y
Yu Yang 已提交
199

Y
Yu Yang 已提交
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
219 220 221 222 223
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
224 225
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
226
      auto t =
Y
Yu Yang 已提交
227
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
228 229
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
230 231 232 233
    }
  }
}

Y
Yu Yang 已提交
234
}  // namespace framework
Y
Yang Yang 已提交
235
}  // namespace paddle