parallel_executor.cc 11.8 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
C
chengduoZH 已提交
16
#include <string>
17
#include <tuple>
Q
qiaolongfei 已提交
18
#include <vector>
C
chengduo 已提交
19
#include "paddle/fluid/framework/ir/graph_helper.h"
Y
Yu Yang 已提交
20

X
clean  
Xin Pan 已提交
21
#include "paddle/fluid/framework/ir/graph.h"
X
Xin Pan 已提交
22

Y
Yu Yang 已提交
23
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
24
#include "paddle/fluid/platform/nccl_helper.h"
Y
Yu Yang 已提交
25
#endif
Y
Yang Yang 已提交
26

Y
yuyang18 已提交
27
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
28
#include "paddle/fluid/framework/details/multi_devices_helper.h"
Y
yuyang18 已提交
29
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
30
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
31
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
32

Y
Yang Yang 已提交
33
namespace paddle {
Y
Yu Yang 已提交
34 35
namespace framework {

Y
Yu Yang 已提交
36 37 38
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
39
      : places_(places) {}
Y
Yu Yang 已提交
40 41 42 43

  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
  Scope *global_scope_;
Y
Yu Yang 已提交
44
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
45

Y
Yu Yang 已提交
46
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
47
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
48
#endif
C
chengduoZH 已提交
49 50
  bool own_local_scope_;
  bool use_cuda_;
51
  bool use_all_reduce_;
Y
Yu Yang 已提交
52 53
};

54 55 56 57
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
58
ParallelExecutor::ParallelExecutor(
59
    const std::vector<platform::Place> &places,
Y
Yu Yang 已提交
60
    const std::unordered_set<std::string> &params,
61 62
    const std::unordered_set<std::string> &bcast_vars,
    const ProgramDesc &main_program, const std::string &loss_var_name,
Y
yuyang18 已提交
63
    Scope *scope, const std::vector<Scope *> &local_scopes,
64
    const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy,
65
    size_t num_trainers, size_t trainer_id)
Y
Yu Yang 已提交
66
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
67
  member_->global_scope_ = scope;
68
  member_->use_cuda_ = exec_strategy.use_cuda_;
69 70 71 72 73 74 75 76
  member_->use_all_reduce_ =
      build_strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce;

  if (!member_->use_all_reduce_) {
    PADDLE_ENFORCE(places.size() > 1,
                   "If you set build_strategy.reduce with 'Reduce',"
                   "the number of places must be greater than 1.");
  }
Y
Yu Yang 已提交
77

78
  // Step 1. Bcast the params to devs.
Y
Yu Yang 已提交
79
  // Create local scopes
80
  if (local_scopes.empty()) {
C
chengduoZH 已提交
81
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
82 83
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
84
      member_->local_scopes_.emplace_back(&scope->NewScope());
85 86
    }
  } else {
C
chengduoZH 已提交
87
    member_->own_local_scope_ = false;
88 89
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
90
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
91
    }
Y
Yu Yang 已提交
92 93
  }

C
chengduoZH 已提交
94
  if (member_->use_cuda_) {
Y
Yu Yang 已提交
95 96
// Bcast Parameters to all GPUs
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
97 98 99 100 101 102 103 104 105
    auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
    ncclUniqueId *nccl_id = nullptr;
    if (nccl_id_var != nullptr) {
      nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
    }
    member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
        member_->places_, nccl_id, num_trainers, trainer_id));
#else
    PADDLE_THROW("Not compiled with CUDA");
Y
Yu Yang 已提交
106
#endif
C
chengduoZH 已提交
107 108 109
  }

  if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
Y
Yancey1989 已提交
110
    BCastParamsToDevices(bcast_vars);
Y
Yu Yang 已提交
111
  }
112
// Startup Program has been run. All local scopes has correct parameters.
Y
yuyang18 已提交
113

114
// Step 2. Convert main_program to SSA form and dependency graph. Also, insert
X
Xin Pan 已提交
115
// ncclOp
Y
yuyang18 已提交
116
#ifdef PADDLE_WITH_CUDA
117
  std::unique_ptr<ir::Graph> graph = build_strategy.Apply(
X
Xin Pan 已提交
118
      main_program, member_->places_, loss_var_name, params,
119
      member_->local_scopes_, member_->use_cuda_, member_->nccl_ctxs_.get());
S
sneaxiy 已提交
120 121 122 123 124 125 126 127 128 129 130 131

  auto max_memory_size = GetEagerDeletionThreshold();
  if (max_memory_size >= 0) {
    for (auto &place : member_->places_) {
      if (!platform::is_gpu_place(place)) continue;
      auto gpu_place = boost::get<platform::CUDAPlace>(place);
      if (gcs_[gpu_place.device] == nullptr) {
        ref_cnts_[gpu_place.device].reset(new details::ReferenceCountMap());
        cur_ref_cnts_[gpu_place.device].reset(
            new details::AtomicReferenceCountMap());
        gcs_[gpu_place.device].reset(
            new StreamGarbageCollector<Tensor>(gpu_place, max_memory_size));
S
sneaxiy 已提交
132 133
      }
    }
S
sneaxiy 已提交
134 135 136 137 138 139 140 141 142 143
    if (!gcs_.empty()) {
      auto ref_cnt_pass =
          ir::PassRegistry::Instance().Get("reference_count_pass");
      ref_cnt_pass->SetNotOwned(details::kGlobalReferenceCount, &ref_cnts_);
      ref_cnt_pass->SetNotOwned(details::kCurReferenceCount, &cur_ref_cnts_);
      ref_cnt_pass->SetNotOwned(details::kGarbageCollector, &gcs_);
      graph = ref_cnt_pass->Apply(std::move(graph));
      graph->SetNotOwned("garbage_collector", &gcs_);
    }
  }
C
chengduoZH 已提交
144
#else
145 146 147
  std::unique_ptr<ir::Graph> graph =
      build_strategy.Apply(main_program, member_->places_, loss_var_name,
                           params, member_->local_scopes_, member_->use_cuda_);
Y
Yu Yang 已提交
148
#endif
X
Xin Pan 已提交
149

150 151 152 153 154 155 156 157 158 159 160
  // Step 3. Create vars in each scope. Passes may also create new vars.
  //         skip control vars and empty vars
  std::vector<details::VariableInfo> var_infos;
  for (auto &node : graph->Nodes()) {
    if (node->IsVar() && !node->IsCtrlVar() && node->Var()) {
      var_infos.emplace_back();
      var_infos.back().name_ = node->Var()->Name();
      var_infos.back().type_ = node->Var()->GetType();
      var_infos.back().persistable_ = node->Var()->Persistable();
    }
  }
C
chengduozh 已提交
161 162 163 164 165 166 167

  if (VLOG_IS_ON(5)) {
    // If the loss_var_name is given, the number of graph should be only one.
    if (loss_var_name.size()) {
      PADDLE_ENFORCE_EQ(ir::GraphNum(*graph), 1,
                        "The number of graph should be only one");
    }
W
Wu Yi 已提交
168 169
  }

Y
yuyang18 已提交
170 171 172 173 174 175
  if (exec_strategy.type_ == ExecutionStrategy::kDefault) {
    member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, places, std::move(graph)));
  } else {
    member_->executor_.reset(new details::FastThreadedSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, places, std::move(graph)));
C
chengduoZH 已提交
176
  }
Y
yuyang18 已提交
177 178 179 180

  member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
      exec_strategy, member_->local_scopes_, std::move(var_infos),
      member_->places_, std::move(member_->executor_)));
Y
Yu Yang 已提交
181 182
}

Y
Yancey1989 已提交
183
void ParallelExecutor::BCastParamsToDevices(
184
    const std::unordered_set<std::string> &vars) const {
X
Xin Pan 已提交
185
  // the initializing bcast, all vars would be bcast from device(0).
186
  for (auto &var : vars) {
X
Xin Pan 已提交
187
    framework::Variable *main_var = member_->local_scopes_[0]->FindVar(var);
J
JiayiFeng 已提交
188
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
189 190 191 192
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
193 194 195 196
    if (!main_tensor.IsInitialized()) {
      VLOG(3) << "one in var not inited, return!";
      continue;
    }
197 198
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
C
chengduoZH 已提交
199
#ifdef PADDLE_WITH_CUDA
200
      std::vector<void *> buffers;
201 202 203 204 205
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
206

X
Xin Pan 已提交
207
        if (i == 0) {
208 209
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
210
          auto local_scope = member_->local_scopes_[i];
211
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
212
          t->Resize(dims);
213
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
214
        }
215
        buffers.push_back(buffer);
216
      }
217

218 219 220 221 222 223
      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
                        "variables' buffer size to bcast NOT equal to places");
      {
        platform::NCCLGroupGuard guard;
        for (size_t i = 0; i < member_->places_.size(); ++i) {
          auto &nccl_ctx = member_->nccl_ctxs_->at(member_->places_[i]);
X
Xin Pan 已提交
224 225
          platform::dynload::ncclBcast(buffers[i], numel, data_type, 0,
                                       nccl_ctx.comm_, nccl_ctx.stream());
226
        }
227
        member_->nccl_ctxs_->WaitAll();
228
      }
C
chengduoZH 已提交
229 230 231
#else
      PADDLE_THROW("Not compiled with CUDA");
#endif
232 233
    } else {
      platform::CPUPlace cpu;
Y
Yancey1989 已提交
234
      for (size_t i = 0; i < member_->places_.size(); ++i) {
X
Xin Pan 已提交
235
        if (i == 0) continue;
Y
Yancey1989 已提交
236

237 238
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
C
chengduo 已提交
239 240 241 242

        // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
        if (member_->use_all_reduce_ || member_->use_cuda_ ||
            var == "@LR_DECAY_COUNTER@") {
243 244 245 246 247 248
          t->Resize(dims);
          t->mutable_data(cpu, main_tensor.type());
          paddle::framework::TensorCopy(main_tensor, cpu, t);
        } else {
          t->ShareDataWith(main_tensor);
        }
Y
Yu Yang 已提交
249
      }
Y
Stash  
Yu Yang 已提交
250 251
    }
  }
Y
Yu Yang 已提交
252
}
Y
Yu Yang 已提交
253

Y
Yu Yang 已提交
254 255
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
X
Xin Pan 已提交
256
  platform::RecordBlock b(0);
S
sneaxiy 已提交
257 258 259
#ifdef PADDLE_WITH_CUDA
  if (!gcs_.empty()) {
    ResetReferenceCount();
S
sneaxiy 已提交
260 261 262 263 264 265 266
    for (auto &pair : cur_ref_cnts_) {
      auto &name_map = *(pair.second);
      for (auto &fetch_name : fetch_tensors) {
        name_map.erase(fetch_name);
      }
      name_map.erase(fetched_var_name);
    }
S
sneaxiy 已提交
267 268
  }
#endif
S
sneaxiy 已提交
269 270 271
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
Y
Yu Yang 已提交
272
}
Y
Yu Yang 已提交
273

Y
Yu Yang 已提交
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
293 294 295 296 297
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
298 299
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
300
      auto t =
Y
Yu Yang 已提交
301
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
302 303
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
304 305 306 307
    }
  }
}

308
ParallelExecutor::~ParallelExecutor() {
C
chengduozh 已提交
309 310 311 312 313 314
  const auto dev_ctxs =
      platform::DeviceContextPool::Instance().GetAllDeviceContexts();
  for (auto &dev_ctx : dev_ctxs) {
    dev_ctx->Wait();
  }

C
chengduoZH 已提交
315
  if (member_->own_local_scope_) {
316
    for (size_t i = 1; i < member_->local_scopes_.size(); ++i) {
M
minqiyang 已提交
317 318 319 320
      Scope *local_scope = member_->local_scopes_[i];
      if (member_->global_scope_->HasKid(local_scope)) {
        member_->global_scope_->DeleteScope(local_scope);
      }
321 322
    }
  }
S
sneaxiy 已提交
323

S
sneaxiy 已提交
324 325
  // member_ must be destructed before gcs_ since the destructor of
  // ReferenceCountOpHandle use raw pointers of gcs_ inside.
S
sneaxiy 已提交
326
  member_.reset();
327 328
}

Y
Yu Yang 已提交
329
}  // namespace framework
Y
Yang Yang 已提交
330
}  // namespace paddle
S
sneaxiy 已提交
331 332 333
#ifdef PADDLE_WITH_CUDA
USE_PASS(reference_count_pass);
#endif