parallel_executor.cc 9.9 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
Q
qiaolongfei 已提交
16

C
chengduoZH 已提交
17
#include <string>
18
#include <tuple>
Q
qiaolongfei 已提交
19
#include <vector>
Y
Yu Yang 已提交
20

Y
Yu Yang 已提交
21
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
22
#include "paddle/fluid/platform/nccl_helper.h"
Y
Yu Yang 已提交
23
#endif
Y
Yang Yang 已提交
24

Y
yuyang18 已提交
25
#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h"
26
#include "paddle/fluid/framework/details/ssa_graph_builder_factory.h"
Y
Yu Yang 已提交
27
#include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h"
28
#include "paddle/fluid/platform/profiler.h"
Y
Yu Yang 已提交
29

Y
Yang Yang 已提交
30
namespace paddle {
Y
Yu Yang 已提交
31 32
namespace framework {

Y
Yu Yang 已提交
33 34 35
class ParallelExecutorPrivate {
 public:
  explicit ParallelExecutorPrivate(const std::vector<platform::Place> &places)
Y
Yu Yang 已提交
36
      : places_(places) {}
Y
Yu Yang 已提交
37 38 39 40

  std::vector<platform::Place> places_;
  std::vector<Scope *> local_scopes_;
  Scope *global_scope_;
Y
Yu Yang 已提交
41
  std::unique_ptr<details::SSAGraphExecutor> executor_;
Y
Yu Yang 已提交
42

Y
Yu Yang 已提交
43
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
44
  std::unique_ptr<platform::NCCLContextMap> nccl_ctxs_;
Y
Yu Yang 已提交
45
#endif
C
chengduoZH 已提交
46 47
  bool own_local_scope_;
  bool use_cuda_;
48
  bool use_all_reduce_;
Y
Yu Yang 已提交
49 50
};

51 52 53 54
std::vector<Scope *> &ParallelExecutor::GetLocalScopes() {
  return member_->local_scopes_;
}

Y
Yu Yang 已提交
55
ParallelExecutor::ParallelExecutor(
56
    const std::vector<platform::Place> &places,
Y
Yu Yang 已提交
57
    const std::unordered_set<std::string> &params,
58 59
    const std::unordered_set<std::string> &bcast_vars,
    const ProgramDesc &main_program, const std::string &loss_var_name,
Y
yuyang18 已提交
60
    Scope *scope, const std::vector<Scope *> &local_scopes,
61
    const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy,
62
    size_t num_trainers, size_t trainer_id)
Y
Yu Yang 已提交
63
    : member_(new ParallelExecutorPrivate(places)) {
Y
Yu Yang 已提交
64
  member_->global_scope_ = scope;
65
  member_->use_cuda_ = exec_strategy.use_cuda_;
66 67 68 69 70 71 72 73
  member_->use_all_reduce_ =
      build_strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce;

  if (!member_->use_all_reduce_) {
    PADDLE_ENFORCE(places.size() > 1,
                   "If you set build_strategy.reduce with 'Reduce',"
                   "the number of places must be greater than 1.");
  }
Y
Yu Yang 已提交
74

75
  // Step 1. Bcast the params to devs.
Y
Yu Yang 已提交
76
  // Create local scopes
77
  if (local_scopes.empty()) {
C
chengduoZH 已提交
78
    member_->own_local_scope_ = true;
Y
Yu Yang 已提交
79 80
    member_->local_scopes_.emplace_back(member_->global_scope_);
    for (size_t i = 1; i < member_->places_.size(); ++i) {
Y
Debug  
Yu Yang 已提交
81
      member_->local_scopes_.emplace_back(&scope->NewScope());
82 83
    }
  } else {
C
chengduoZH 已提交
84
    member_->own_local_scope_ = false;
85 86
    PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size());
    for (size_t i = 0; i < member_->places_.size(); ++i) {
87
      member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope());
88
    }
Y
Yu Yang 已提交
89 90
  }

C
chengduoZH 已提交
91
  if (member_->use_cuda_) {
Y
Yu Yang 已提交
92 93
// Bcast Parameters to all GPUs
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
94 95 96 97 98 99 100 101 102
    auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME);
    ncclUniqueId *nccl_id = nullptr;
    if (nccl_id_var != nullptr) {
      nccl_id = nccl_id_var->GetMutable<ncclUniqueId>();
    }
    member_->nccl_ctxs_.reset(new platform::NCCLContextMap(
        member_->places_, nccl_id, num_trainers, trainer_id));
#else
    PADDLE_THROW("Not compiled with CUDA");
Y
Yu Yang 已提交
103
#endif
C
chengduoZH 已提交
104 105 106
  }

  if (member_->local_scopes_.size() != 1 && local_scopes.empty()) {
Y
Yancey1989 已提交
107
    BCastParamsToDevices(bcast_vars);
Y
Yu Yang 已提交
108
  }
Y
yuyang18 已提交
109 110 111 112 113 114 115 116 117 118
  // Startup Program has been run. All local scopes has correct parameters.

  // Step 2. Create vars in each scope;
  std::vector<details::VariableInfo> var_infos;
  for (auto *var : main_program.Block(0).AllVars()) {
    var_infos.emplace_back();
    var_infos.back().name_ = var->Name();
    var_infos.back().type_ = var->GetType();
    var_infos.back().persistable_ = var->Persistable();
  }
Y
Yu Yang 已提交
119

Y
yuyang18 已提交
120 121 122
  // Step 3. Convert main_program to SSA form and dependency graph. Also, insert
  // ncclOp
  details::SSAGraphBuilderFactory builder_factory(
Y
Yancey1989 已提交
123
      member_->places_, loss_var_name, params, member_->local_scopes_,
Y
yuyang18 已提交
124
      build_strategy);
C
chengduoZH 已提交
125
  if (member_->use_cuda_) {
Y
yuyang18 已提交
126
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
127 128
    builder_factory.SetNCCLContextMap(member_->nccl_ctxs_.get());
#else
129
    PADDLE_THROW("Not compiled with CUDA.");
Y
Yu Yang 已提交
130
#endif
C
chengduoZH 已提交
131
  }
Y
yuyang18 已提交
132

F
fengjiayi 已提交
133
  builder_ = builder_factory.Create();
Y
Yu Yang 已提交
134
  member_->executor_.reset(new details::ThreadedSSAGraphExecutor(
Y
yuyang18 已提交
135
      exec_strategy, member_->local_scopes_, places,
136
      builder_->Build(main_program)));
Y
Yu Yang 已提交
137

Y
yuyang18 已提交
138 139 140
  member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor(
      exec_strategy, member_->local_scopes_, std::move(var_infos),
      member_->places_, std::move(member_->executor_)));
Y
Yu Yang 已提交
141 142
}

Y
Yancey1989 已提交
143
void ParallelExecutor::BCastParamsToDevices(
144
    const std::unordered_set<std::string> &vars) const {
145
  // the initializing bcast, all vars would be bcast from device(0),
Y
yi.wu 已提交
146
  // otherwise
147
  // bcast from the specified device.
Y
wip  
yi.wu 已提交
148
  bool initializing = builder_.get() == nullptr ? true : false;
Y
Yu Yang 已提交
149

150
  for (auto &var : vars) {
151 152
    int var_dev_id =
        builder_.get() == nullptr ? -1 : builder_->GetVarDeviceID(var);
Y
yi.wu 已提交
153
    if (!initializing && var_dev_id == -1) continue;
154 155

    framework::Variable *main_var = nullptr;
Y
yi.wu 已提交
156
    if (initializing) {
157 158 159 160 161
      main_var = member_->local_scopes_[0]->FindVar(var);
    } else {
      main_var = member_->local_scopes_[var_dev_id]->FindVar(var);
    }

J
JiayiFeng 已提交
162
    if (main_var == nullptr || !main_var->IsType<LoDTensor>()) {
163 164 165 166 167 168
      continue;
    }

    auto &main_tensor = main_var->Get<LoDTensor>();
    auto &dims = main_tensor.dims();
    if (paddle::platform::is_gpu_place(main_tensor.place())) {
C
chengduoZH 已提交
169
#ifdef PADDLE_WITH_CUDA
170
      std::vector<void *> buffers;
171 172 173 174 175
      size_t numel = main_tensor.numel();
      ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type());
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
176

Y
yi.wu 已提交
177
        if ((initializing && i == 0) ||
Y
update  
yi.wu 已提交
178
            (!initializing && static_cast<int>(i) == var_dev_id)) {
179 180
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
181
          auto local_scope = member_->local_scopes_[i];
182
          auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
Y
Update  
Yu Yang 已提交
183
          t->Resize(dims);
184
          buffer = t->mutable_data(place, main_tensor.type());
Y
Update  
Yu Yang 已提交
185
        }
186
        buffers.push_back(buffer);
187
      }
188

189 190 191 192 193 194
      PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(),
                        "variables' buffer size to bcast NOT equal to places");
      {
        platform::NCCLGroupGuard guard;
        for (size_t i = 0; i < member_->places_.size(); ++i) {
          auto &nccl_ctx = member_->nccl_ctxs_->at(member_->places_[i]);
Y
yi.wu 已提交
195 196 197 198
          if (initializing) {
            platform::dynload::ncclBcast(buffers[i], numel, data_type, 0,
                                         nccl_ctx.comm_, nccl_ctx.stream());
          } else {
Y
update  
yi.wu 已提交
199
            if (var_dev_id >= 0) {
Y
yi.wu 已提交
200 201 202 203 204
              platform::dynload::ncclBcast(buffers[i], numel, data_type,
                                           var_dev_id, nccl_ctx.comm_,
                                           nccl_ctx.stream());
            }
          }
205
        }
206
        member_->nccl_ctxs_->WaitAll();
207
      }
208

C
chengduoZH 已提交
209 210 211
#else
      PADDLE_THROW("Not compiled with CUDA");
#endif
212 213
    } else {
      platform::CPUPlace cpu;
Y
Yancey1989 已提交
214 215 216 217 218
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        if ((initializing && i == 0) ||
            (!initializing && static_cast<int>(i) == var_dev_id))
          continue;

219 220
        auto local_scope = member_->local_scopes_[i];
        auto *t = local_scope->Var(var)->GetMutable<LoDTensor>();
C
chengduo 已提交
221 222 223 224

        // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix.
        if (member_->use_all_reduce_ || member_->use_cuda_ ||
            var == "@LR_DECAY_COUNTER@") {
225 226 227 228 229 230
          t->Resize(dims);
          t->mutable_data(cpu, main_tensor.type());
          paddle::framework::TensorCopy(main_tensor, cpu, t);
        } else {
          t->ShareDataWith(main_tensor);
        }
Y
Yu Yang 已提交
231
      }
Y
Stash  
Yu Yang 已提交
232 233
    }
  }
Y
Yu Yang 已提交
234
}
Y
Yu Yang 已提交
235

Y
Yu Yang 已提交
236 237
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
X
Xin Pan 已提交
238
  platform::RecordBlock b(0);
Y
Yu Yang 已提交
239 240 241
  auto fetch_data = member_->executor_->Run(fetch_tensors);
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<FeedFetchList>() =
      fetch_data;
Y
Yu Yang 已提交
242
}
Y
Yu Yang 已提交
243

Y
Yu Yang 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
void ParallelExecutor::FeedTensorsIntoLocalScopes(
    const std::vector<std::unordered_map<std::string, LoDTensor>> &tensors) {
  PADDLE_ENFORCE_EQ(member_->local_scopes_.size(), tensors.size());

  for (size_t i = 0; i < tensors.size(); ++i) {
    auto &map = tensors[i];
    auto *scope = member_->local_scopes_[i];
    for (auto &pair : map) {
      auto *trg = scope->Var(pair.first)->GetMutable<LoDTensor>();
      trg->ShareDataWith(pair.second);
      trg->set_lod(pair.second.lod());
    }
  }
}

void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes(
    const std::unordered_map<std::string, LoDTensor> &tensors) {
  for (auto pair : tensors) {
    auto lod_tensors = pair.second.SplitLoDTensor(member_->places_);
263 264 265 266 267
    PADDLE_ENFORCE_EQ(
        member_->places_.size(), lod_tensors.size(),
        "The number of samples of current batch is less than the count of "
        "devices, currently, it is not allowed. (%d vs %d)",
        member_->places_.size(), lod_tensors.size());
X
Xin Pan 已提交
268 269
    for (size_t j = 0; j < member_->places_.size(); ++j) {
      // TODO(panxy0718): Do I need to delete this var?
270
      auto t =
Y
Yu Yang 已提交
271
          member_->local_scopes_[j]->Var(pair.first)->GetMutable<LoDTensor>();
272 273
      t->ShareDataWith(lod_tensors[j]);
      t->set_lod(lod_tensors[j].lod());
X
Xin Pan 已提交
274 275 276 277
    }
  }
}

278
ParallelExecutor::~ParallelExecutor() {
C
chengduoZH 已提交
279
  if (member_->own_local_scope_) {
280 281 282 283 284 285
    for (size_t i = 1; i < member_->local_scopes_.size(); ++i) {
      member_->global_scope_->DeleteScope(member_->local_scopes_[i]);
    }
  }
}

Y
Yu Yang 已提交
286
}  // namespace framework
Y
Yang Yang 已提交
287
}  // namespace paddle