parallel_executor.cc 7.7 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
Q
qiaolongfei 已提交
16

C
chengduoZH 已提交
17
#include <string>
18
#include <tuple>
Q
qiaolongfei 已提交
19
#include <vector>
Y
Yu Yang 已提交
20

Y
Yu Yang 已提交
21
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
22
#include "paddle/fluid/platform/nccl_helper.h"
Y
Yu Yang 已提交
23
#endif
Y
Yang Yang 已提交
24

Y
Yu Yang 已提交
25 26
#include "paddle/fluid/framework/details/multi_devices_graph_builder.h"
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
27
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
28

Y
Yang Yang 已提交
29
namespace paddle {
Y
Yu Yang 已提交
30 31
namespace framework {

Y
Yu Yang 已提交
32 33 34
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
35
      : places_(places) {}
Y
Yu Yang 已提交
36 37 38 39

  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
  Scope *global_scope_;
Y
Yu Yang 已提交
40
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
41

Y
Yu Yang 已提交
42
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
43
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
44
#endif
45 46

  std::vector<std::tuple<std::string, proto::VarType::Type, bool>> var_types_;
Y
Yu Yang 已提交
47 48
};

49 50 51 52
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
53
ParallelExecutor::ParallelExecutor(
54 55
    size_t num_threads, bool use_event,
    const std::vector<platform::Place> &places,
Y
Yu Yang 已提交
56
    const std::unordered_set<std::string> &params,
57 58 59
    const std::unordered_set<std::string> &bcast_vars,
    const ProgramDesc &main_program, const std::string &loss_var_name,
    Scope *scope, const std::vector<Scope *> &local_scopes, bool allow_op_delay)
Y
Yu Yang 已提交
60
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
61
  member_->global_scope_ = scope;
Y
Yu Yang 已提交
62

63
  // Step 1. Bcast the params to devs.
Y
Yu Yang 已提交
64
  // Create local scopes
65 66 67 68 69 70 71 72 73
  if (local_scopes.empty()) {
    for (size_t i = 0; i < member_->places_.size(); ++i) {
      member_->local_scopes_.push_back(&scope->NewScope());
    }
  } else {
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
      member_->local_scopes_.push_back(local_scopes[i]);
    }
Y
Yu Yang 已提交
74 75
  }

Y
Yu Yang 已提交
76 77 78 79
// Bcast Parameters to all GPUs
#ifdef PADDLE_WITH_CUDA
  member_->nccl_ctxs_.reset(new platform::NCCLContextMap(member_->places_));
#endif
80 81 82
  if (platform::is_gpu_place(places[0]) && member_->local_scopes_.size() != 1 &&
      local_scopes.empty()) {  // Is CUDA
    BCastParamsToGPUs(bcast_vars);
Y
Yu Yang 已提交
83
  }
Y
Yu Yang 已提交
84
// Startup Program has been run. All local scopes has correct parameters.
Y
Yu Yang 已提交
85

Y
Yu Yang 已提交
86 87 88
// Step 2. Convert main_program to SSA form and dependency graph. Also, insert
// ncclOp
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
89 90 91
  details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name,
                                           params, member_->local_scopes_,
                                           member_->nccl_ctxs_.get());
Y
Yu Yang 已提交
92 93 94 95
#else
  details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name,
                                           params, member_->local_scopes_);
#endif
Y
Yu Yang 已提交
96
  auto graph = builder.Build(main_program);
Y
Yu Yang 已提交
97

Y
Yu Yang 已提交
98
  member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
X
Xin Pan 已提交
99 100
      num_threads, use_event, member_->local_scopes_, places, std::move(graph),
      allow_op_delay));
Y
Yu Yang 已提交
101

Y
Yu Yang 已提交
102
  // Step 3. Create vars in each scope;
103 104 105
  for (auto *var : main_program.Block(0).AllVars()) {
    member_->var_types_.emplace_back(var->Name(), var->GetType(),
                                     var->Persistable());
Y
Yu Yang 已提交
106
  }
Y
Yu Yang 已提交
107 108 109
}

void ParallelExecutor::BCastParamsToGPUs(
110
    const std::unordered_set<std::string> &vars) const {
Y
Yu Yang 已提交
111
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
112
  auto *main_scope = member_->local_scopes_[0];
Y
Yu Yang 已提交
113

114 115
  for (auto &var : vars) {
    auto *main_var = main_scope->FindVar(var);
J
JiayiFeng 已提交
116
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      platform::NCCLGroupGuard guard;
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
        if (i == 0) {
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
132
          auto local_scope = member_->local_scopes_[i];
133
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
134
          t->Resize(dims);
135
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
136
        }
137 138 139 140 141 142 143 144 145 146 147 148
        auto &nccl_ctx = member_->nccl_ctxs_->at(place);
        platform::dynload::ncclBcast(buffer, numel, data_type, 0,
                                     nccl_ctx.comm_, nccl_ctx.stream());
      }
    } else {
      platform::CPUPlace cpu;
      for (size_t i = 1; i < member_->places_.size(); ++i) {
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
        t->Resize(dims);
        t->mutable_data(cpu, main_tensor.type());
        paddle::framework::TensorCopy(main_tensor, cpu, t);
Y
Yu Yang 已提交
149
      }
Y
Stash  
Yu Yang 已提交
150
    }
Y
Yu Yang 已提交
151
    member_->nccl_ctxs_->WaitAll();
Y
Stash  
Yu Yang 已提交
152
  }
Y
Yu Yang 已提交
153 154 155 156
#else
  PADDLE_THROW("Not compiled with CUDA");
#endif
}
Y
Yu Yang 已提交
157

X
Xin Pan 已提交
158 159 160 161
void ParallelExecutor::Run(
    const std::vector<std::string> &fetch_tensors,
    const std::string &fetched_var_name,
    const std::unordered_map<std::string, LoDTensor> &feed_tensors) {
X
Xin Pan 已提交
162
  platform::RecordBlock b(0);
X
Xin Pan 已提交
163
  SplitTensorToPlaces(feed_tensors);
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

  // Create local scopes.
  for (auto &scope : member_->local_scopes_) {
    Scope &local_scope = scope->NewScope();
    *scope->Var(details::kLocalExecScopeName)->GetMutable<Scope *>() =
        &local_scope;

    for (auto &name_type_pair : member_->var_types_) {
      if (scope->FindVar(std::get<0>(name_type_pair)) != nullptr) {
        continue;
      }

      if (std::get<2>(name_type_pair)) {  // Persistable
        InitializeVariable(scope->Var(std::get<0>(name_type_pair)),
                           std::get<1>(name_type_pair));
      } else {
        InitializeVariable(scope->Var(std::get<0>(name_type_pair)),
                           std::get<1>(name_type_pair));
      }
    }
  }

Y
Yu Yang 已提交
186 187 188
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
189 190 191 192 193 194 195 196 197 198 199

  // Wait All computational streams
  for (auto p : member_->places_) {
    platform::DeviceContextPool::Instance().Get(p)->Wait();
  }
  for (auto &scope : member_->local_scopes_) {
    auto &local_scope =
        *scope->Var(details::kLocalExecScopeName)->GetMutable<Scope *>();
    scope->DeleteScope(local_scope);
    local_scope = nullptr;
  }
Y
Yu Yang 已提交
200
}
Y
Yu Yang 已提交
201

X
Xin Pan 已提交
202 203 204 205
void ParallelExecutor::SplitTensorToPlaces(
    const std::unordered_map<std::string, LoDTensor> &feed_tensors) {
  for (auto it : feed_tensors) {
    auto lod_tensors = it.second.SplitLoDTensor(member_->places_);
206 207 208 209 210
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
211 212
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
213 214 215 216
      auto t =
          member_->local_scopes_[j]->Var(it.first)->GetMutable<LoDTensor>();
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
217 218 219 220
    }
  }
}

Y
Yu Yang 已提交
221
}  // namespace framework
Y
Yang Yang 已提交
222
}  // namespace paddle