parallel_executor.cc 8.5 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
Q
qiaolongfei 已提交
16

C
chengduoZH 已提交
17
#include <string>
18
#include <tuple>
Q
qiaolongfei 已提交
19
#include <vector>
Y
Yu Yang 已提交
20

Y
Yu Yang 已提交
21
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
22
#include "paddle/fluid/platform/nccl_helper.h"
Y
Yu Yang 已提交
23
#endif
Y
Yang Yang 已提交
24

Y
Yu Yang 已提交
25 26
#include "paddle/fluid/framework/details/multi_devices_graph_builder.h"
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
27
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
28

Y
Yang Yang 已提交
29
namespace paddle {
Y
Yu Yang 已提交
30 31
namespace framework {

Y
Yu Yang 已提交
32 33 34
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
35
      : places_(places) {}
Y
Yu Yang 已提交
36 37 38 39

  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
  Scope *global_scope_;
Y
Yu Yang 已提交
40
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
41

Y
Yu Yang 已提交
42
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
43
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
44
#endif
45 46

  std::vector<std::tuple<std::string, proto::VarType::Type, bool>> var_types_;
47
  bool own_local_scope;
Y
Yu Yang 已提交
48 49
};

50 51 52 53
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
54
ParallelExecutor::ParallelExecutor(
55
    const std::vector<platform::Place> &places,
Y
Yu Yang 已提交
56
    const std::unordered_set<std::string> &params,
57 58
    const std::unordered_set<std::string> &bcast_vars,
    const ProgramDesc &main_program, const std::string &loss_var_name,
Y
yuyang18 已提交
59
    Scope *scope, const std::vector<Scope *> &local_scopes,
Y
yuyang18 已提交
60
    const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy)
Y
Yu Yang 已提交
61
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
62
  member_->global_scope_ = scope;
Y
Yu Yang 已提交
63

64
  // Step 1. Bcast the params to devs.
Y
Yu Yang 已提交
65
  // Create local scopes
66
  if (local_scopes.empty()) {
67
    member_->own_local_scope = true;
Y
Yu Yang 已提交
68 69
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
70
      member_->local_scopes_.emplace_back(&scope->NewScope());
71 72
    }
  } else {
73
    member_->own_local_scope = false;
74 75
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
76
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
77
    }
Y
Yu Yang 已提交
78 79
  }

Y
Yu Yang 已提交
80 81 82 83
// Bcast Parameters to all GPUs
#ifdef PADDLE_WITH_CUDA
  member_->nccl_ctxs_.reset(new platform::NCCLContextMap(member_->places_));
#endif
84 85 86
  if (platform::is_gpu_place(places[0]) && member_->local_scopes_.size() != 1 &&
      local_scopes.empty()) {  // Is CUDA
    BCastParamsToGPUs(bcast_vars);
Y
Yu Yang 已提交
87
  }
Y
Yu Yang 已提交
88
// Startup Program has been run. All local scopes has correct parameters.
Y
Yu Yang 已提交
89

Y
Yu Yang 已提交
90 91 92
// Step 2. Convert main_program to SSA form and dependency graph. Also, insert
// ncclOp
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
93 94
  details::MultiDevSSAGraphBuilder builder(
      member_->places_, loss_var_name, params, member_->local_scopes_,
Y
yuyang18 已提交
95
      member_->nccl_ctxs_.get(), build_strategy);
Y
Yu Yang 已提交
96
#else
Y
yuyang18 已提交
97 98 99
  details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name,
                                           params, member_->local_scopes_,
                                           build_strategy);
Y
Yu Yang 已提交
100
#endif
Y
Yu Yang 已提交
101
  auto graph = builder.Build(main_program);
Y
Yu Yang 已提交
102

Y
Yu Yang 已提交
103
  member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
Y
yuyang18 已提交
104
      exec_strategy, member_->local_scopes_, places, std::move(graph)));
Y
Yu Yang 已提交
105

Y
Yu Yang 已提交
106
  // Step 3. Create vars in each scope;
107 108 109
  for (auto *var : main_program.Block(0).AllVars()) {
    member_->var_types_.emplace_back(var->Name(), var->GetType(),
                                     var->Persistable());
Y
Yu Yang 已提交
110
  }
Y
Yu Yang 已提交
111 112 113
}

void ParallelExecutor::BCastParamsToGPUs(
114
    const std::unordered_set<std::string> &vars) const {
Y
Yu Yang 已提交
115
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
116
  auto *main_scope = member_->local_scopes_[0];
Y
Yu Yang 已提交
117

118 119
  for (auto &var : vars) {
    auto *main_var = main_scope->FindVar(var);
J
JiayiFeng 已提交
120
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      platform::NCCLGroupGuard guard;
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
        if (i == 0) {
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
136
          auto local_scope = member_->local_scopes_[i];
137
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
138
          t->Resize(dims);
139
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
140
        }
141 142 143 144 145 146 147 148 149 150 151 152
        auto &nccl_ctx = member_->nccl_ctxs_->at(place);
        platform::dynload::ncclBcast(buffer, numel, data_type, 0,
                                     nccl_ctx.comm_, nccl_ctx.stream());
      }
    } else {
      platform::CPUPlace cpu;
      for (size_t i = 1; i < member_->places_.size(); ++i) {
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
        t->Resize(dims);
        t->mutable_data(cpu, main_tensor.type());
        paddle::framework::TensorCopy(main_tensor, cpu, t);
Y
Yu Yang 已提交
153
      }
Y
Stash  
Yu Yang 已提交
154
    }
Y
Yu Yang 已提交
155
    member_->nccl_ctxs_->WaitAll();
Y
Stash  
Yu Yang 已提交
156
  }
Y
Yu Yang 已提交
157 158 159 160
#else
  PADDLE_THROW("Not compiled with CUDA");
#endif
}
Y
Yu Yang 已提交
161

Y
Yu Yang 已提交
162 163
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
X
Xin Pan 已提交
164
  platform::RecordBlock b(0);
165
  // Create local scopes.
Y
Yu Yang 已提交
166 167 168
  for (auto it = member_->local_scopes_.rbegin();
       it != member_->local_scopes_.rend(); ++it) {
    auto &scope = *it;
169 170 171 172 173 174 175 176 177 178 179 180 181
    Scope &local_scope = scope->NewScope();
    *scope->Var(details::kLocalExecScopeName)->GetMutable<Scope *>() =
        &local_scope;

    for (auto &name_type_pair : member_->var_types_) {
      if (scope->FindVar(std::get<0>(name_type_pair)) != nullptr) {
        continue;
      }

      if (std::get<2>(name_type_pair)) {  // Persistable
        InitializeVariable(scope->Var(std::get<0>(name_type_pair)),
                           std::get<1>(name_type_pair));
      } else {
Y
update  
Yu Yang 已提交
182
        InitializeVariable(local_scope.Var(std::get<0>(name_type_pair)),
183 184 185 186 187
                           std::get<1>(name_type_pair));
      }
    }
  }

Y
Yu Yang 已提交
188 189 190
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
191 192 193 194 195 196 197 198 199 200

  // Wait All computational streams
  for (auto p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
  }
  for (auto &scope : member_->local_scopes_) {
    auto &local_scope =
        *scope->Var(details::kLocalExecScopeName)->GetMutable<Scope *>();
    scope->DeleteScope(local_scope);
  }
Y
Yu Yang 已提交
201
}
Y
Yu Yang 已提交
202

Y
Yu Yang 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
222 223 224 225 226
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
227 228
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
229
      auto t =
Y
Yu Yang 已提交
230
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
231 232
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
233 234 235 236
    }
  }
}

237 238 239 240 241 242 243 244
ParallelExecutor::~ParallelExecutor() {
  if (member_->own_local_scope) {
    for (size_t i = 1; i < member_->local_scopes_.size(); ++i) {
      member_->global_scope_->DeleteScope(member_->local_scopes_[i]);
    }
  }
}

Y
Yu Yang 已提交
245
}  // namespace framework
Y
Yang Yang 已提交
246
}  // namespace paddle