parallel_executor.cc 13.1 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
C
chengduoZH 已提交
16
#include <string>
17
#include <tuple>
Q
qiaolongfei 已提交
18
#include <vector>
C
chengduo 已提交
19
#include "paddle/fluid/framework/ir/graph_helper.h"
Y
Yu Yang 已提交
20

X
clean  
Xin Pan 已提交
21
#include "paddle/fluid/framework/ir/graph.h"
X
Xin Pan 已提交
22

P
peizhilin 已提交
23
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yu Yang 已提交
24
#include "paddle/fluid/platform/nccl_helper.h"
Y
Yu Yang 已提交
25
#endif
Y
Yang Yang 已提交
26

Y
yuyang18 已提交
27
#include "paddle/fluid/framework/details/fast_threaded_ssa_graph_executor.h"
28
#include "paddle/fluid/framework/details/multi_devices_helper.h"
Y
yuyang18 已提交
29
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
Y
Yu Yang 已提交
30
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
31
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
32

Y
Yu Yang 已提交
33
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
34
#include "gperftools/profiler.h"
Y
Yu Yang 已提交
35
#endif
Y
Yu Yang 已提交
36
DEFINE_string(pe_profile_fname, "",
Y
Yu Yang 已提交
37 38 39
              "Profiler filename for PE, which generated by gperftools."
              "Only valid when compiled `WITH_PRIFILER=ON`. Empty if disable.");

Y
Yang Yang 已提交
40
namespace paddle {
Y
Yu Yang 已提交
41
namespace framework {
Y
Yu Yang 已提交
42

Y
Yu Yang 已提交
43
static std::once_flag gProfileOnce;
Y
Yu Yang 已提交
44
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
45
static bool gProfileStarted = false;
Y
Yu Yang 已提交
46
#endif
Y
Yu Yang 已提交
47 48 49
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
50
      : places_(places) {
Y
Yu Yang 已提交
51
    if (!FLAGS_pe_profile_fname.empty()) {
Y
Yu Yang 已提交
52 53
      std::call_once(gProfileOnce, [] {
#ifdef WITH_GPERFTOOLS
Y
Yu Yang 已提交
54
        ProfilerStart(FLAGS_pe_profile_fname.c_str());
Y
Yu Yang 已提交
55 56 57
        gProfileStarted = true;
#else
        LOG(WARNING) << "Paddle is not compiled with gperftools. "
Y
Yu Yang 已提交
58
                        "FLAGS_pe_profile_fname will be ignored";
Y
Yu Yang 已提交
59 60 61 62
#endif
      });
    }
  }
Y
Yu Yang 已提交
63

64 65 66 67 68 69 70 71 72 73 74
  ~ParallelExecutorPrivate() {
    if (own_local_scope_) {
      for (size_t i = 1; i < local_scopes_.size(); ++i) {
        // Skip the first scope, since it is the global scope.
        Scope *local_scope = local_scopes_[i];
        if (global_scope_->HasKid(local_scope)) {
          global_scope_->DeleteScope(local_scope);
        }
      }
    }
  }
Y
Yu Yang 已提交
75 76
  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
77
  Scope *global_scope_;  // not owned
Y
Yu Yang 已提交
78
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
79

P
peizhilin 已提交
80
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yu Yang 已提交
81
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
82
#endif
C
chengduoZH 已提交
83 84
  bool own_local_scope_;
  bool use_cuda_;
85
  bool use_all_reduce_;
Y
Yu Yang 已提交
86 87
};

88 89 90 91
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
92
ParallelExecutor::ParallelExecutor(
93
    const std::vector<platform::Place> &places,
Y
Yu Yang 已提交
94
    const std::unordered_set<std::string> &params,
95 96
    const std::unordered_set<std::string> &bcast_vars,
    const ProgramDesc &main_program, const std::string &loss_var_name,
Y
yuyang18 已提交
97
    Scope *scope, const std::vector<Scope *> &local_scopes,
98
    const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy,
99
    size_t num_trainers, size_t trainer_id)
Y
Yu Yang 已提交
100
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
101
  member_->global_scope_ = scope;
102
  member_->use_cuda_ = exec_strategy.use_cuda_;
103 104 105 106 107 108 109 110
  member_->use_all_reduce_ =
      build_strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce;

  if (!member_->use_all_reduce_) {
    PADDLE_ENFORCE(places.size() > 1,
                   "If you set build_strategy.reduce with 'Reduce',"
                   "the number of places must be greater than 1.");
  }
Y
Yu Yang 已提交
111

112
  // Step 1. Bcast the params to devs.
Y
Yu Yang 已提交
113
  // Create local scopes
114
  if (local_scopes.empty()) {
C
chengduoZH 已提交
115
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
116 117
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
118
      member_->local_scopes_.emplace_back(&scope->NewScope());
119 120
    }
  } else {
C
chengduoZH 已提交
121
    member_->own_local_scope_ = false;
122 123
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
124
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
125
    }
Y
Yu Yang 已提交
126 127
  }

C
chengduoZH 已提交
128
  if (member_->use_cuda_) {
Y
Yu Yang 已提交
129
// Bcast Parameters to all GPUs
P
peizhilin 已提交
130
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
C
chengduoZH 已提交
131 132 133 134 135 136 137 138 139
    auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
    ncclUniqueId *nccl_id = nullptr;
    if (nccl_id_var != nullptr) {
      nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
    }
    member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
        member_->places_, nccl_id, num_trainers, trainer_id));
#else
    PADDLE_THROW("Not compiled with CUDA");
Y
Yu Yang 已提交
140
#endif
C
chengduoZH 已提交
141 142 143
  }

  if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
Y
Yancey1989 已提交
144
    BCastParamsToDevices(bcast_vars);
Y
Yu Yang 已提交
145
  }
146
// Startup Program has been run. All local scopes has correct parameters.
Y
yuyang18 已提交
147

148
// Step 2. Convert main_program to SSA form and dependency graph. Also, insert
X
Xin Pan 已提交
149
// ncclOp
P
peizhilin 已提交
150
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
151
  std::unique_ptr<ir::Graph> graph = build_strategy.Apply(
X
Xin Pan 已提交
152
      main_program, member_->places_, loss_var_name, params,
153
      member_->local_scopes_, member_->use_cuda_, member_->nccl_ctxs_.get());
S
sneaxiy 已提交
154 155 156 157 158 159 160 161 162 163 164 165

  auto max_memory_size = GetEagerDeletionThreshold();
  if (max_memory_size >= 0) {
    for (auto &place : member_->places_) {
      if (!platform::is_gpu_place(place)) continue;
      auto gpu_place = boost::get<platform::CUDAPlace>(place);
      if (gcs_[gpu_place.device] == nullptr) {
        ref_cnts_[gpu_place.device].reset(new details::ReferenceCountMap());
        cur_ref_cnts_[gpu_place.device].reset(
            new details::AtomicReferenceCountMap());
        gcs_[gpu_place.device].reset(
            new StreamGarbageCollector<Tensor>(gpu_place, max_memory_size));
S
sneaxiy 已提交
166 167
      }
    }
S
sneaxiy 已提交
168 169 170 171 172 173 174 175 176 177
    if (!gcs_.empty()) {
      auto ref_cnt_pass =
          ir::PassRegistry::Instance().Get("reference_count_pass");
      ref_cnt_pass->SetNotOwned(details::kGlobalReferenceCount, &ref_cnts_);
      ref_cnt_pass->SetNotOwned(details::kCurReferenceCount, &cur_ref_cnts_);
      ref_cnt_pass->SetNotOwned(details::kGarbageCollector, &gcs_);
      graph = ref_cnt_pass->Apply(std::move(graph));
      graph->SetNotOwned("garbage_collector", &gcs_);
    }
  }
C
chengduoZH 已提交
178
#else
179 180 181
  std::unique_ptr<ir::Graph> graph =
      build_strategy.Apply(main_program, member_->places_, loss_var_name,
                           params, member_->local_scopes_, member_->use_cuda_);
Y
Yu Yang 已提交
182
#endif
X
Xin Pan 已提交
183

184 185 186 187 188 189 190 191 192 193 194
  // Step 3. Create vars in each scope. Passes may also create new vars.
  //         skip control vars and empty vars
  std::vector<details::VariableInfo> var_infos;
  for (auto &node : graph->Nodes()) {
    if (node->IsVar() && !node->IsCtrlVar() && node->Var()) {
      var_infos.emplace_back();
      var_infos.back().name_ = node->Var()->Name();
      var_infos.back().type_ = node->Var()->GetType();
      var_infos.back().persistable_ = node->Var()->Persistable();
    }
  }
W
Wu Yi 已提交
195 196
  // If the loss_var_name is given, the number of graph should be only one.
  if (loss_var_name.size()) {
C
chengduo 已提交
197 198 199 200 201 202 203 204 205 206 207
    size_t graph_num = ir::GraphNum(*graph);
    if (graph_num > 1) {
      LOG(WARNING)
          << "The number of graph should be only one, "
             "but the current graph has "
          << ir::GraphNum(*graph)
          << " sub_graphs. If you want to see the nodes of the "
             "sub_graphs, you should use 'FLAGS_print_sub_graph_dir' "
             "to specify the output dir. NOTES: if you not do training, "
             "please don't pass loss_var_name.";
    }
W
Wu Yi 已提交
208 209
  }

Y
yuyang18 已提交
210 211 212 213 214 215
  if (exec_strategy.type_ == ExecutionStrategy::kDefault) {
    member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, places, std::move(graph)));
  } else {
    member_->executor_.reset(new details::FastThreadedSSAGraphExecutor(
        exec_strategy, member_->local_scopes_, places, std::move(graph)));
C
chengduoZH 已提交
216
  }
Y
yuyang18 已提交
217 218 219 220

  member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
      exec_strategy, member_->local_scopes_, std::move(var_infos),
      member_->places_, std::move(member_->executor_)));
Y
Yu Yang 已提交
221 222
}

Y
Yancey1989 已提交
223
void ParallelExecutor::BCastParamsToDevices(
224
    const std::unordered_set<std::string> &vars) const {
X
Xin Pan 已提交
225
  // the initializing bcast, all vars would be bcast from device(0).
226
  for (auto &var : vars) {
X
Xin Pan 已提交
227
    framework::Variable *main_var = member_->local_scopes_[0]->FindVar(var);
J
JiayiFeng 已提交
228
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
229 230 231 232
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
233
    if (!main_tensor.IsInitialized()) {
M
minqiyang 已提交
234
      VLOG(3) << "one in var not inited, return!";
235 236
      continue;
    }
237 238
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
P
peizhilin 已提交
239
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
240
      std::vector<void *> buffers;
241 242 243 244 245
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
246

X
Xin Pan 已提交
247
        if (i == 0) {
248 249
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
250
          auto local_scope = member_->local_scopes_[i];
251
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
252
          t->Resize(dims);
253
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
254
        }
255
        buffers.push_back(buffer);
256
      }
257

258 259 260 261 262 263
      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
                        "variables' buffer size to bcast NOT equal to places");
      {
        platform::NCCLGroupGuard guard;
        for (size_t i = 0; i < member_->places_.size(); ++i) {
          auto &nccl_ctx = member_->nccl_ctxs_->at(member_->places_[i]);
X
Xin Pan 已提交
264 265
          platform::dynload::ncclBcast(buffers[i], numel, data_type, 0,
                                       nccl_ctx.comm_, nccl_ctx.stream());
266
        }
267
        member_->nccl_ctxs_->WaitAll();
268
      }
C
chengduoZH 已提交
269 270 271
#else
      PADDLE_THROW("Not compiled with CUDA");
#endif
272 273
    } else {
      platform::CPUPlace cpu;
Y
Yancey1989 已提交
274
      for (size_t i = 0; i < member_->places_.size(); ++i) {
X
Xin Pan 已提交
275
        if (i == 0) continue;
Y
Yancey1989 已提交
276

277 278
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
C
chengduo 已提交
279 280 281 282

        // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
        if (member_->use_all_reduce_ || member_->use_cuda_ ||
            var == "@LR_DECAY_COUNTER@") {
283 284 285 286 287 288
          t->Resize(dims);
          t->mutable_data(cpu, main_tensor.type());
          paddle::framework::TensorCopy(main_tensor, cpu, t);
        } else {
          t->ShareDataWith(main_tensor);
        }
Y
Yu Yang 已提交
289
      }
Y
Stash  
Yu Yang 已提交
290 291
    }
  }
Y
Yu Yang 已提交
292
}
Y
Yu Yang 已提交
293

Y
Yu Yang 已提交
294 295
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
Y
Yu Yang 已提交
296 297 298 299 300 301
#ifdef WITH_GPERFTOOLS
  if (gProfileStarted) {
    ProfilerFlush();
  }
#endif

X
Xin Pan 已提交
302
  platform::RecordBlock b(0);
S
sneaxiy 已提交
303 304 305
#ifdef PADDLE_WITH_CUDA
  if (!gcs_.empty()) {
    ResetReferenceCount();
S
sneaxiy 已提交
306 307 308 309 310 311 312
    for (auto &pair : cur_ref_cnts_) {
      auto &name_map = *(pair.second);
      for (auto &fetch_name : fetch_tensors) {
        name_map.erase(fetch_name);
      }
      name_map.erase(fetched_var_name);
    }
S
sneaxiy 已提交
313 314
  }
#endif
S
sneaxiy 已提交
315 316 317
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
Y
Yu Yang 已提交
318
}
Y
Yu Yang 已提交
319

Y
Yu Yang 已提交
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
339 340 341 342 343
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
344 345
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
346
      auto t =
Y
Yu Yang 已提交
347
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
348 349
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
350 351 352 353
    }
  }
}

354
ParallelExecutor::~ParallelExecutor() {
355 356
  for (auto &p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
C
chengduozh 已提交
357
  }
S
sneaxiy 已提交
358 359
  // member_ must be destructed before gcs_ since the destructor of
  // ReferenceCountOpHandle use raw pointers of gcs_ inside.
S
sneaxiy 已提交
360
  member_.reset();
361 362
}

Y
Yu Yang 已提交
363
}  // namespace framework
Y
Yang Yang 已提交
364
}  // namespace paddle
S
sneaxiy 已提交
365 366 367
#ifdef PADDLE_WITH_CUDA
USE_PASS(reference_count_pass);
#endif