multi_devices_graph_builder.cc 14.7 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/details/multi_devices_graph_builder.h"
C
chengduoZH 已提交
15 16
#include <utility>
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
17
#include "paddle/fluid/framework/details/computation_op_handle.h"
C
chengduoZH 已提交
18
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yu Yang 已提交
19
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
T
wip  
typhoonzero 已提交
20
#include "paddle/fluid/framework/details/send_op_handle.h"
Y
Fix bug  
yuyang18 已提交
21
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
22
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
23 24 25 26

#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/framework/details/nccl_all_reduce_op_handle.h"
#endif
Y
Yu Yang 已提交
27

Y
Yu Yang 已提交
28 29 30
#include <string>
#include <vector>

Y
Yu Yang 已提交
31 32 33
namespace paddle {
namespace framework {
namespace details {
Y
Yu Yang 已提交
34 35

#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
36 37 38 39
MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder(
    const std::vector<platform::Place> &places,
    const std::string &loss_var_name,
    const std::unordered_set<std::string> &params,
C
chengduoZH 已提交
40
    const std::vector<Scope *> &local_scopes,
Y
yuyang18 已提交
41
    platform::NCCLContextMap *nccl_ctxs, const BuildStrategy &strategy)
Y
Yu Yang 已提交
42 43 44
    : loss_var_name_(loss_var_name),
      places_(places),
      local_scopes_(local_scopes),
C
chengduoZH 已提交
45
      nccl_ctxs_(nccl_ctxs),
Y
yuyang18 已提交
46
      strategy_(strategy) {
Y
Yu Yang 已提交
47 48 49 50 51
#else
MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder(
    const std::vector<platform::Place> &places,
    const std::string &loss_var_name,
    const std::unordered_set<std::string> &params,
Y
yuyang18 已提交
52
    const std::vector<Scope *> &local_scopes, const BuildStrategy &strategy)
Y
Yu Yang 已提交
53 54
    : loss_var_name_(loss_var_name),
      places_(places),
C
chengduoZH 已提交
55
      local_scopes_(local_scopes),
Y
yuyang18 已提交
56
      strategy_(strategy) {
Y
Yu Yang 已提交
57
#endif
Y
Yu Yang 已提交
58 59 60 61 62
  for (auto &p : params) {
    grad_names_.insert(GradVarName(p));
  }
}

Y
Yu Yang 已提交
63 64
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result,
                                                const OpDesc &op,
Y
Yu Yang 已提交
65 66
                                                size_t place_id) const {
  auto p = places_[place_id];
T
wip  
typhoonzero 已提交
67
  auto *op_handle = result->ops_.back().get();
X
Xin Pan 已提交
68 69
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
T
wip  
typhoonzero 已提交
70

Y
Yu Yang 已提交
71 72 73
  for (auto &each_var_name : op.InputArgumentNames()) {
    VarHandle *var =
        CreateOrGetLatestVarHandle(result, each_var_name, p, place_id);
T
wip  
typhoonzero 已提交
74 75 76
    op_handle->AddInput(var);
  }

Y
Yu Yang 已提交
77 78
  for (auto &each_var_name : op.OutputArgumentNames()) {
    CreateOpOutput(result, op_handle, each_var_name, p, place_id);
T
wip  
typhoonzero 已提交
79 80 81
  }
}

T
typhoonzero 已提交
82 83 84 85 86 87
bool MultiDevSSAGraphBuilder::IsDistTrainOp(const OpDesc &op,
                                            OpDesc *send_op) const {
  if (send_op == nullptr) {
    return false;
  }

Y
Yu Yang 已提交
88 89 90 91 92
  /**
   * Check any of opvars contains `.block` and in sendvars
   */
  auto checker = [](const std::vector<std::string> &opvars,
                    const std::vector<std::string> &sendvars) -> bool {
T
typhoonzero 已提交
93 94 95
    for (auto &var : opvars) {
      if (var.find(".block") != std::string::npos &&
          std::find(sendvars.begin(), sendvars.end(), var) != sendvars.end()) {
Y
Yu Yang 已提交
96
        return true;
T
typhoonzero 已提交
97 98
      }
    }
Y
Yu Yang 已提交
99
    return false;
T
typhoonzero 已提交
100 101
  };

102
  if (op.Type() == "split" || op.Type() == "split_byref") {
T
typhoonzero 已提交
103 104 105 106 107 108 109
    return checker(op.OutputArgumentNames(), send_op->InputArgumentNames());
  } else if (op.Type() == "concat") {
    return checker(op.InputArgumentNames(), send_op->OutputArgumentNames());
  }
  return false;
}

Y
Yu Yang 已提交
110 111
std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build(
    const ProgramDesc &program) const {
C
fix ci  
chengduoZH 已提交
112 113 114 115
  std::unordered_map<std::string, proto::VarType::Type> var_types;
  for (auto *var : program.Block(0).AllVars()) {
    var_types[var->Name()] = var->GetType();
  }
C
chengduoZH 已提交
116

Y
Yu Yang 已提交
117
  auto graph = new SSAGraph();
Y
Yu Yang 已提交
118
  SSAGraph &result = *graph;
C
chengduoZH 已提交
119
  std::unordered_set<std::string> og_has_been_broadcast;
Y
Yu Yang 已提交
120 121 122 123 124

  // We cannot invoke resize. It is a bug of GCC 4.8
  result.vars_ = std::vector<
      std::unordered_map<std::string, std::vector<std::unique_ptr<VarHandle>>>>(
      places_.size());
Y
Yu Yang 已提交
125

T
typhoonzero 已提交
126
  // Find "send" op first for split is in front of send.
Y
Yu Yang 已提交
127
  OpDesc *send_op = GetSendOpDesc(program);
T
typhoonzero 已提交
128

C
chengduoZH 已提交
129 130 131 132 133 134
  size_t cur_device_id = 0;
  std::vector<std::unordered_set<std::string>> var_name_on_devices;
  std::vector<std::unordered_set<std::string>> bcast_var_name_set;
  var_name_on_devices.resize(places_.size());
  bcast_var_name_set.resize(places_.size());

Y
Yu Yang 已提交
135 136
  bool is_forwarding = true;
  for (auto *op : program.Block(0).AllOps()) {
Y
Yu Yang 已提交
137 138 139 140
    if (op->Type() == "send") {
      // append send op if program is distributed trainer main program.
      // always use the first device
      CreateSendOp(&result, *op);
T
typhoonzero 已提交
141 142
    } else if (IsDistTrainOp(*op, send_op)) {
      CreateComputationalOps(&result, *op, 1);
Y
Yu Yang 已提交
143
    } else if (IsScaleLossOp(*op)) {
Y
Yu Yang 已提交
144
      // user can customize loss@grad if not use_default_grad_scale_
Y
yuyang18 已提交
145 146
      if (strategy_.gradient_scale_ !=
          BuildStrategy::GradientScaleStrategy::kCustomized) {
Y
Yu Yang 已提交
147 148
        CreateScaleLossGradOp(&result);
      }
Y
Yu Yang 已提交
149
      is_forwarding = false;
Y
Yu Yang 已提交
150
    } else {
C
chengduoZH 已提交
151 152 153 154 155 156 157 158 159
      int op_dev_id = GetOpDeviceID(var_name_on_devices, *op);
      if (op_dev_id == -1) {  // var on all device
        CreateComputationalOps(&result, *op, places_.size());
      } else {
        CreateComputationalOp(&result, *op, op_dev_id);
        for (auto &var_name : op->OutputArgumentNames()) {
          var_name_on_devices[op_dev_id].emplace(var_name);
        }
      }
C
chengduoZH 已提交
160
      if (!is_forwarding && places_.size() > 1) {
Y
Yu Yang 已提交
161
        // Currently, we assume that once gradient is generated, it can be
Y
Yu Yang 已提交
162
        // broadcast, and each gradient is only broadcast once.
Y
yuyang18 已提交
163 164 165
        if (static_cast<bool>(boost::get<int>(op->GetAttr(
                                  OpProtoAndCheckerMaker::OpRoleAttrName())) &
                              static_cast<int>(OpRole::kBackward))) {
Y
yuyang18 已提交
166 167 168 169 170 171 172
          try {
            auto backward_vars =
                boost::get<std::vector<std::string>>(op->GetNullableAttr(
                    OpProtoAndCheckerMaker::OpRoleVarAttrName()));

            PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);

Y
Fix bug  
yuyang18 已提交
173
            for (size_t i = 0; i < backward_vars.size(); i += 2) {
Y
yuyang18 已提交
174 175
              auto &p_name = backward_vars[i];
              auto &g_name = backward_vars[i + 1];
Y
yuyang18 已提交
176 177
              VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;

Y
yuyang18 已提交
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
              switch (strategy_.reduce_) {
                case BuildStrategy::ReduceStrategy::kReduce:
                  CreateReduceOp(&result, g_name, cur_device_id);
                  var_name_on_devices[cur_device_id].emplace(g_name);
                  bcast_var_name_set[cur_device_id].emplace(p_name);
                  cur_device_id = (cur_device_id + 1) % places_.size();
                  break;
                case BuildStrategy::ReduceStrategy::kAllReduce:
                  if (IsSparseGradient(var_types, g_name)) {
                    CreateReduceOp(&result, g_name, 0);
                    CreateBroadcastOp(&result, g_name, 0);
                  } else {
                    InsertNCCLAllReduceOp(&result, g_name);
                  }
                  break;
              }
C
chengduoZH 已提交
194
            }
Y
yuyang18 已提交
195
          } catch (boost::bad_get e) {
Y
Yu Yang 已提交
196 197 198 199 200 201
          }
        }
      }
    }
  }

C
chengduoZH 已提交
202 203 204 205 206 207 208
  // Insert BCast Ops
  for (size_t dev_id = 0; dev_id < bcast_var_name_set.size(); ++dev_id) {
    auto &to_bcast_set = bcast_var_name_set[dev_id];
    for (auto &bcast_name : to_bcast_set) {
      CreateBroadcastOp(&result, bcast_name, dev_id);
    }
  }
Y
Yu Yang 已提交
209 210 211 212 213
  /*
    Dependency graph has been constructed. However, there are still data
    harzaeds need to be handled.
   */
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
214

Y
Yu Yang 已提交
215 216 217 218 219
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);

Y
Yu Yang 已提交
220 221 222 223 224 225
  if (VLOG_IS_ON(10)) {
    std::ostringstream sout;
    PrintGraphviz(*graph, sout);
    VLOG(10) << sout.str();
  }

Y
Yu Yang 已提交
226
  return std::unique_ptr<SSAGraph>(graph);
Y
Yu Yang 已提交
227 228
}

C
fix ci  
chengduoZH 已提交
229 230 231 232 233 234 235 236
bool MultiDevSSAGraphBuilder::IsSparseGradient(
    const std::unordered_map<std::string, proto::VarType::Type> &var_types,
    const std::string &og) const {
  PADDLE_ENFORCE(var_types.count(og) != 0);
  if (var_types.at(og) == proto::VarType::SELECTED_ROWS) {
    return true;
  }
  return false;
237 238
}

C
chengduoZH 已提交
239 240
void MultiDevSSAGraphBuilder::CreateBroadcastOp(SSAGraph *result,
                                                const std::string &p_name,
C
chengduoZH 已提交
241
                                                size_t src_dev_id) const {
C
chengduoZH 已提交
242 243 244 245 246 247 248
#ifdef PADDLE_WITH_CUDA
  auto *op_handle = new BroadcastOpHandle(local_scopes_, places_, nccl_ctxs_);
#else
  auto *op_handle = new BroadcastOpHandle(local_scopes_, places_);
#endif

  result->ops_.emplace_back(op_handle);
C
chengduoZH 已提交
249
  auto *in = result->vars_.at(src_dev_id).at(p_name).back().get();
C
chengduoZH 已提交
250 251 252
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
C
chengduoZH 已提交
253
    auto &vars = result->vars_.at(i).at(p_name);
C
chengduoZH 已提交
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
    auto &p = places_[i];
    auto *out_var = new VarHandle(vars.size(), i, p_name, p);
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
#ifndef ADDLE_WITH_CUDA
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
#endif
  }
}

void MultiDevSSAGraphBuilder::CreateComputationalOp(SSAGraph *result,
                                                    const OpDesc &op,
                                                    int dev_id) const {
  result->ops_.emplace_back(
      new ComputationOpHandle(op, local_scopes_[dev_id], places_[dev_id]));
  CreateOpHandleIOs(result, op, dev_id);
}

Y
Yu Yang 已提交
273 274 275 276 277 278 279 280 281
OpDesc *MultiDevSSAGraphBuilder::GetSendOpDesc(
    const ProgramDesc &program) const {
  for (auto *op : program.Block(0).AllOps()) {
    if (op->Type() == "send") {
      return op;
    }
  }
  return nullptr;
}
Y
Yu Yang 已提交
282 283 284 285 286 287 288 289 290 291
void MultiDevSSAGraphBuilder::InsertNCCLAllReduceOp(
    SSAGraph *result, const std::string &og) const {
#ifdef PADDLE_WITH_CUDA
  result->ops_.emplace_back(
      new NCCLAllReduceOpHandle(local_scopes_, places_, *nccl_ctxs_));
  auto *op_handle = result->ops_.back().get();

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    auto &vars = result->vars_[i][og];
Y
Yu Yang 已提交
292 293
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
Y
Yu Yang 已提交
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
    op_handle->AddInput(prev_grad.get());

    auto var = new VarHandle(vars.size() - 1, i, og, p);
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
#else
  PADDLE_ENFORCE("Not implemented");
#endif
}

bool MultiDevSSAGraphBuilder::IsParameterGradientOnce(
    const std::string &og,
    std::unordered_set<std::string> *og_has_been_broadcast) const {
  bool is_pg_once =
      grad_names_.count(og) != 0 && og_has_been_broadcast->count(og) == 0;
  if (is_pg_once) {
    // Insert NCCL AllReduce Op
    og_has_been_broadcast->insert(og);
  }
  return is_pg_once;
}

C
chengduoZH 已提交
317 318 319
int MultiDevSSAGraphBuilder::GetOpDeviceID(
    const std::vector<std::unordered_set<std::string>> &var_name_on_devices,
    const OpDesc &op) const {
Y
yuyang18 已提交
320
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
C
chengduoZH 已提交
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
    return -1;
  }

  int var_dev_id = -1;
  for (auto &var_name : op.InputArgumentNames()) {
    if (var_dev_id != -1) break;
    for (size_t i = 0; i < var_name_on_devices.size(); ++i) {
      if (var_name_on_devices[i].count(var_name)) {
        var_dev_id = static_cast<int>(i);
        break;
      }
    }
  }
  return var_dev_id;
}

Y
Yu Yang 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(SSAGraph *result) const {
  for (size_t i = 0; i < places_.size(); ++i) {
// Insert ScaleCost OpHandle
#ifdef PADDLE_WITH_CUDA
    auto *communication_dev_ctx = nccl_ctxs_->DevCtx(places_[i]);
#else
    auto *communication_dev_ctx =
        platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
#endif

    auto *op_handle =
        new ScaleLossGradOpHandle(local_scopes_.size(), local_scopes_[i],
                                  places_[i], communication_dev_ctx);
    result->ops_.emplace_back(op_handle);

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

    CreateOpOutput(result, op_handle, GradVarName(loss_var_name_), places_[i],
                   i);
  }
}

void MultiDevSSAGraphBuilder::CreateComputationalOps(SSAGraph *result,
T
typhoonzero 已提交
364 365 366
                                                     const OpDesc &op,
                                                     size_t num_places) const {
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
367 368 369
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
    result->ops_.emplace_back(new ComputationOpHandle(op, s, p));
Y
Yu Yang 已提交
370
    CreateOpHandleIOs(result, op, scope_idx);
Y
Yu Yang 已提交
371 372 373
  }
}

C
chengduoZH 已提交
374 375 376
VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(SSAGraph *result,
                                                   const std::string &og,
                                                   int dst_dev_id) const {
C
chengduoZH 已提交
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
#ifdef PADDLE_WITH_CUDA
  result->ops_.emplace_back(
      new ReduceOpHandle(local_scopes_, places_, nccl_ctxs_));
#else
  result->ops_.emplace_back(new ReduceOpHandle(local_scopes_, places_));
#endif
  auto *op_handle = result->ops_.back().get();

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &vars = result->vars_[i][og];
#ifndef PADDLE_WITH_CUDA
    auto &p = places_[i];
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
#endif
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
    op_handle->AddInput(prev_grad.get());
  }
  auto &vars = result->vars_[dst_dev_id][og];
  auto var =
      new VarHandle(vars.size() - 1, dst_dev_id, og, places_[dst_dev_id]);
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

Y
Yu Yang 已提交
404 405 406 407 408 409 410 411
void MultiDevSSAGraphBuilder::CreateSendOp(SSAGraph *result,
                                           const OpDesc &op) const {
  auto &p = places_[0];
  auto *s = local_scopes_[0];
  // FIXME(wuyi): send op always copy from GPU 0
  result->ops_.emplace_back(new SendOpHandle(op, s, p));
  // Create inputs for output on original place and no ssa output
  // is created for send op.
Y
Yu Yang 已提交
412
  CreateOpHandleIOs(result, op, 0);
Y
Yu Yang 已提交
413 414 415
}

bool MultiDevSSAGraphBuilder::IsScaleLossOp(const OpDesc &op) const {
Y
yuyang18 已提交
416 417
  return boost::get<int>(
             op.GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Fix bug  
yuyang18 已提交
418 419 420
             (static_cast<int>(OpRole::kBackward) |
              static_cast<int>(OpRole::kLoss)) &&
         !loss_var_name_.empty();  // If loss_var is empty. This is test mode
Y
Yu Yang 已提交
421
}
Y
Yu Yang 已提交
422 423 424
}  // namespace details
}  // namespace framework
}  // namespace paddle