multi_devices_graph_pass.cc 33.4 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
C
chengduoZH 已提交
14
#include <algorithm>
Y
Yancey1989 已提交
15
#include <fstream>
C
chengduoZH 已提交
16
#include <string>
C
chengduoZH 已提交
17
#include <utility>
C
chengduoZH 已提交
18 19
#include <vector>

20
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
C
chengduoZH 已提交
21
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
22
#include "paddle/fluid/framework/details/computation_op_handle.h"
23
#include "paddle/fluid/framework/details/data_balance_op_handle.h"
X
Xin Pan 已提交
24
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
C
chengduoZH 已提交
25
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yancey1989 已提交
26
#include "paddle/fluid/framework/details/rpc_op_handle.h"
Y
Yu Yang 已提交
27
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
X
better  
Xin Pan 已提交
28
#include "paddle/fluid/framework/ir/graph_helper.h"
X
Xin Pan 已提交
29
#include "paddle/fluid/framework/ir/node.h"
Y
Fix bug  
yuyang18 已提交
30
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
31
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
32

Y
Yu Yang 已提交
33 34 35
namespace paddle {
namespace framework {
namespace details {
X
Xin Pan 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
namespace {
void PolishGraphToSupportDataHazards(ir::Graph *graph) {
  for (auto &var_map : graph->Get<GraphVars>(kGraphVars)) {
    for (auto &name_pair : var_map) {
      if (name_pair.second.size() <= 1) {
        continue;
      }
      auto it_new = name_pair.second.rbegin();
      auto it_old = name_pair.second.rbegin();
      ++it_old;
      for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) {
        OpHandleBase *write_op = (*it_new)->GeneratedOp();
        const auto &read_ops = (*it_old)->PendingOps();

        for (auto *read_op : read_ops) {
          // Manually add a dependency var from read_op to write_op;
          if (read_op == write_op) {
            // Read Write is the same op.
            continue;
          }
          bool has_dep = false;
          for (auto *r_out : read_op->Outputs()) {
            for (auto *w_in : write_op->Inputs()) {
              if (r_out->Node() == w_in->Node()) {
                has_dep = true;
                break;
              }
            }
          }
          if (has_dep) continue;

          auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar());
          read_op->AddOutput(dep_var);
          write_op->AddInput(dep_var);
          graph->Get<GraphDepVars>(kGraphDepVars).emplace(dep_var);
        }
      }
    }
  }
}

VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node,
                                      const platform::Place &place,
                                      size_t place_offset) {
  auto &var_holders = graph->Get<GraphVars>(kGraphVars)[place_offset];
  auto &var_holder = var_holders[node->Name()];
  VarHandle *var = nullptr;
  if (var_holder.empty()) {
    if (node->Var()) {
      var = new VarHandle(graph->CreateVarNode(node->Var()), 0, place_offset,
                          node->Name(), place);
    } else {
      var = new VarHandle(
          graph->CreateEmptyNode(node->Name(), ir::Node::Type::kVariable), 0,
          place_offset, node->Name(), place);
    }
    var_holder.emplace_back(var);
  } else {
    var = var_holder.rbegin()->get();
  }
  return var;
}

void CreateOpOutput(ir::Graph *graph, OpHandleBase *op_handle,
                    ir::Node *new_node, const platform::Place &place,
                    size_t place_offset) {
  auto &vars =
      graph->Get<GraphVars>(kGraphVars)[place_offset][new_node->Name()];
  size_t version = vars.size();
  auto var =
      new VarHandle(new_node, version, place_offset, new_node->Name(), place);
  vars.emplace_back(var);
  op_handle->AddOutput(var);
}

void AddOutputToLeafOps(ir::Graph *graph) {
  for (auto &op : graph->Get<GraphOps>(kGraphOps)) {
    if (!op->Outputs().empty()) {
      continue;
    }
    auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar());
    graph->Get<GraphDepVars>(kGraphDepVars).emplace(dummy_leaf);
    op->AddOutput(dummy_leaf);
  }
}
}  // namespace
Y
Yu Yang 已提交
122

X
Xin Pan 已提交
123 124 125 126 127 128
static const char kLossVarName[] = "loss_var_name";
static const char kPlaces[] = "places";
static const char kParams[] = "params";
static const char kLocalScopes[] = "local_scopes";
static const char kStrategy[] = "strategy";

X
Xin Pan 已提交
129
void MultiDevSSAGraphBuilder::Init() const {
X
Xin Pan 已提交
130 131 132 133
  loss_var_name_ = Get<const std::string>(kLossVarName);
  places_ = Get<const std::vector<platform::Place>>(kPlaces);
  local_scopes_ = Get<const std::vector<Scope *>>(kLocalScopes);
  strategy_ = Get<const BuildStrategy>(kStrategy);
Y
Yu Yang 已提交
134
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
135
  nccl_ctxs_ = &Get<platform::NCCLContextMap>("nccl_ctxs");
Y
Yu Yang 已提交
136
#endif
X
Xin Pan 已提交
137

X
Xin Pan 已提交
138
  for (auto &p : Get<const std::unordered_set<std::string>>(kParams)) {
Y
Yu Yang 已提交
139 140
    grad_names_.insert(GradVarName(p));
  }
Y
Yancey1989 已提交
141
  balance_vars_.resize(places_.size(), 0);
Y
yuyang18 已提交
142 143 144 145 146
  if (strategy_.enable_data_balance_ && places_.size() == 1) {
    LOG(WARNING) << "It is no need to enable data balance when there is only "
                    "one place. enable_data_balance is set to False.";
    strategy_.enable_data_balance_ = false;
  }
Y
Yu Yang 已提交
147 148
}

X
Xin Pan 已提交
149 150
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(ir::Graph *result,
                                                ir::Node *node,
Y
Yu Yang 已提交
151 152
                                                size_t place_id) const {
  auto p = places_[place_id];
X
Xin Pan 已提交
153
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
X
Xin Pan 已提交
154 155
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
T
wip  
typhoonzero 已提交
156

157 158
  for (ir::Node *input : node->inputs) {
    VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id);
T
wip  
typhoonzero 已提交
159 160 161
    op_handle->AddInput(var);
  }

162
  for (ir::Node *output : node->outputs) {
X
polish  
Xin Pan 已提交
163 164 165 166 167 168 169 170
    ir::Node *new_node = nullptr;
    if (output->Var()) {
      new_node = result->CreateVarNode(output->Var());
    } else {
      new_node =
          result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
    }
    CreateOpOutput(result, op_handle, new_node, p, place_id);
T
wip  
typhoonzero 已提交
171 172
  }
}
Y
fix pe  
Yancey1989 已提交
173 174

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainSendVars(
X
Xin Pan 已提交
175
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
176
  std::vector<std::string> send_vars;
Y
Yancey1989 已提交
177 178
  // since parameters are all in block 0,
  // it's enough to only scan send ops in block 0
179 180
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
181 182
    // TODO(Yancey1989): use a graceful method to find send op,
    // instead of the the hard code string
183
    if (op->Type() == "send") {
Y
fix pe  
Yancey1989 已提交
184 185 186 187 188 189 190 191 192 193
      auto op_vars = op->InputArgumentNames();
      send_vars.reserve(send_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      send_vars.insert(send_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return send_vars;
}

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainRecvVars(
X
Xin Pan 已提交
194
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
195
  std::vector<std::string> recv_vars;
196 197
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
198 199 200
    // TODO(Yancey1989): use a graceful method to find recv op,
    // instead of the hard code string
    if (op->Type() == "recv") {
Y
fix pe  
Yancey1989 已提交
201 202 203 204 205 206 207 208 209 210
      auto op_vars = op->OutputArgumentNames();
      recv_vars.reserve(recv_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      recv_vars.insert(recv_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return recv_vars;
}

bool MultiDevSSAGraphBuilder::IsDistTrainOp(
211
    ir::Node *node, const std::vector<std::string> &send_vars,
Y
fix pe  
Yancey1989 已提交
212 213
    const std::vector<std::string> &recv_vars) const {
  if (send_vars.size() == 0 || recv_vars.size() == 0) {
T
typhoonzero 已提交
214 215 216
    return false;
  }

Y
Yu Yang 已提交
217 218 219 220
  /**
   * Check any of opvars contains `.block` and in sendvars
   */
  auto checker = [](const std::vector<std::string> &opvars,
Y
fix pe  
Yancey1989 已提交
221
                    const std::vector<std::string> &rpc_vars) -> bool {
T
typhoonzero 已提交
222
    for (auto &var : opvars) {
Y
Yancey1989 已提交
223 224 225
      // a variable name with the suffix `.block` means it's a splited
      // variable by (DistributeTranspiler)
      // [python/paddle/fluid/transpiler/distribute_transpiler.py]
T
typhoonzero 已提交
226
      if (var.find(".block") != std::string::npos &&
Y
fix pe  
Yancey1989 已提交
227
          std::find(rpc_vars.begin(), rpc_vars.end(), var) != rpc_vars.end()) {
Y
Yu Yang 已提交
228
        return true;
T
typhoonzero 已提交
229 230
      }
    }
Y
Yu Yang 已提交
231
    return false;
T
typhoonzero 已提交
232 233
  };

234 235 236
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
X
Xin Pan 已提交
237
    input_var_names.push_back(input->Name());
238 239
  }
  for (ir::Node *output : node->outputs) {
X
Xin Pan 已提交
240
    output_var_names.push_back(output->Name());
241 242 243 244
  }

  return checker(output_var_names, send_vars) ||
         checker(input_var_names, recv_vars);
T
typhoonzero 已提交
245 246
}

Y
Yancey1989 已提交
247 248 249 250
size_t MultiDevSSAGraphBuilder::GetAppropriateDeviceID(
    const std::vector<std::string> &var_names) const {
  int64_t numel_sum = 0;
  for (auto var_name : var_names) {
X
Xin Pan 已提交
251
    if (all_vars_.find(var_name) == all_vars_.end()) continue;
Y
Yancey1989 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
    auto var_desc = all_vars_.at(var_name);
    PADDLE_ENFORCE_NOT_NULL(var_desc);
    auto dim = framework::make_ddim(var_desc->GetShape());
    int64_t numel = framework::product(dim);
    PADDLE_ENFORCE_GT(numel, 0);
    numel_sum += numel;
  }

  auto smallest =
      std::min_element(std::begin(balance_vars_), std::end(balance_vars_));
  size_t dev_id =
      static_cast<size_t>(std::distance(std::begin(balance_vars_), smallest));
  balance_vars_[dev_id] += numel_sum;
  return dev_id;
}

X
better  
Xin Pan 已提交
268 269 270 271 272
// Topology sort the graph nodes from inputs to outputs.
// Since SSAGraphBuilder depends on forward/backward nodes to assign devices
// to parameter/gradients before optimizer ops, topo sort is insufficient. (
// some optimizer ops might not depend on any nodes), we manually move all
// optimizer nodes after last backward nodes.
X
Xin Pan 已提交
273 274 275
// However, the assumption by SSAGraphBuilder should be relaxed in the future.
std::vector<ir::Node *> SortOpsAndDelayOptimizeOp(const ir::Graph &graph) {
  std::vector<ir::Node *> ret = ir::TopologySortOperations(graph);
X
better  
Xin Pan 已提交
276 277 278 279 280
  size_t last_backward = 0;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (boost::get<int>(
            ret[i]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
        static_cast<int>(OpRole::kBackward)) {
X
Xin Pan 已提交
281
      last_backward = i;
X
better  
Xin Pan 已提交
282 283 284
    }
  }

X
Xin Pan 已提交
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
  std::vector<ir::Node *> optimize_ops;
  std::vector<ir::Node *> sorted_ret;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (i < last_backward) {
      if (boost::get<int>(ret[i]->Op()->GetAttr(
              OpProtoAndCheckerMaker::OpRoleAttrName())) ==
          static_cast<int>(OpRole::kOptimize)) {
        optimize_ops.push_back(ret[i]);
      } else {
        sorted_ret.push_back(ret[i]);
      }
    } else if (i == last_backward) {
      sorted_ret.push_back(ret[i]);
      // Verify that no operations before optimize ops depends on optimize ops.
      std::unordered_set<ir::Node *> optimize_set(optimize_ops.begin(),
                                                  optimize_ops.end());
      for (ir::Node *n : sorted_ret) {
        for (ir::Node *in : n->inputs) {
          for (ir::Node *pre_n : in->inputs) {
            PADDLE_ENFORCE(optimize_set.find(pre_n) == optimize_set.end(),
                           "optimize operations cannot be depended by forward "
                           "or backward node %s -> %s",
                           pre_n->Name(), n->Name());
          }
        }
X
Xin Pan 已提交
310
      }
X
Xin Pan 已提交
311 312 313 314
      sorted_ret.insert(sorted_ret.end(), optimize_ops.begin(),
                        optimize_ops.end());
    } else {
      sorted_ret.push_back(ret[i]);
X
Xin Pan 已提交
315 316
    }
  }
X
better  
Xin Pan 已提交
317 318 319
  return sorted_ret;
}

X
Xin Pan 已提交
320
std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilder::ApplyImpl(
X
Xin Pan 已提交
321
    std::unique_ptr<ir::Graph> graph) const {
X
Xin Pan 已提交
322
  Init();
X
Xin Pan 已提交
323
  // Give the topology sort order and rebuild the graph structure.
X
better  
Xin Pan 已提交
324
  std::vector<ir::Node *> sorted_ops = SortOpsAndDelayOptimizeOp(*graph);
X
Xin Pan 已提交
325 326
  auto nodes = graph->ReleaseNodes();
  ir::Graph &result = *graph;
327 328

  for (auto &node : nodes) {
X
Xin Pan 已提交
329
    if (node->NodeType() == ir::Node::Type::kVariable && node->Var()) {
X
Xin Pan 已提交
330
      all_vars_.emplace(node->Name(), node->Var());
331
    }
C
fix ci  
chengduoZH 已提交
332
  }
C
chengduoZH 已提交
333
  std::unordered_set<std::string> og_has_been_broadcast;
Y
Yu Yang 已提交
334 335

  // We cannot invoke resize. It is a bug of GCC 4.8
X
Xin Pan 已提交
336 337 338 339
  result.Set(kGraphVars, new GraphVars(places_.size()));
  result.Set(kGraphDepVars, new GraphDepVars);
  result.Set(kGraphOps, new GraphOps);
  result.Set(kShardedVarDevice, new ShardedVarDevice);
340

Y
fix pe  
Yancey1989 已提交
341
  // find send/recv vars so that we can place the distributed training
342
  // related op in the place 0
X
Xin Pan 已提交
343 344
  auto send_vars = FindDistTrainSendVars(sorted_ops);
  auto recv_vars = FindDistTrainRecvVars(sorted_ops);
T
typhoonzero 已提交
345

C
chengduoZH 已提交
346 347 348
  std::vector<std::unordered_set<std::string>> bcast_var_name_set;
  bcast_var_name_set.resize(places_.size());

C
chengduoZH 已提交
349
  size_t cur_device_id = 0;
Y
Yu Yang 已提交
350
  bool is_forwarding = true;
351

X
better  
Xin Pan 已提交
352
  for (ir::Node *node : sorted_ops) {
Y
Yancey1989 已提交
353
    if (boost::get<int>(
354
            node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Yancey1989 已提交
355
        static_cast<int>(OpRole::kRPC)) {
X
Xin Pan 已提交
356 357 358 359
      CreateRPCOp(&result, node);
    } else if (IsDistTrainOp(node, send_vars, recv_vars)) {
      CreateDistTrainOp(&result, node);
    } else if (IsScaleLossOp(node)) {
Y
Yu Yang 已提交
360
      // user can customize loss@grad if not use_default_grad_scale_
Y
yuyang18 已提交
361 362
      if (strategy_.gradient_scale_ !=
          BuildStrategy::GradientScaleStrategy::kCustomized) {
X
Xin Pan 已提交
363
        // TODO(paddle-dev): Why is there no input for this op_handle?
364 365
        auto loss_grad_name = node->Op()->OutputArgumentNames()[0];
        CreateScaleLossGradOp(&result, loss_grad_name);
Y
Yu Yang 已提交
366
      }
367 368 369 370
      // This assumes the backward generating code will ensure IsScaleLossOp
      // is true only for the op that scale the final scalar loss.
      // It also assumes backward op will always follow the forward op in
      // the block.
Y
Yu Yang 已提交
371
      is_forwarding = false;
Y
Yu Yang 已提交
372
    } else {
X
Xin Pan 已提交
373
      int op_dev_id = GetOpDeviceID(result, node);
C
chengduo 已提交
374
      if (op_dev_id != -1) {  // This op only runs on one specific device.
X
Xin Pan 已提交
375
        CreateComputationalOp(&result, node, op_dev_id);
376
        for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
377
          graph->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
378
              .emplace(n->Name(), op_dev_id);
C
chengduoZH 已提交
379
        }
C
chengduo 已提交
380 381 382
      } else {
        // This op runs on all devices, and its output may have parameter's
        // gradients.
X
Xin Pan 已提交
383
        // TODO(paddle-dev): Why is so special about "read" op?
384 385
        if (node->Op()->Type() == "read" && strategy_.enable_data_balance_) {
          node->Op()->SetAttr("throw_eof_exp", false);
X
Xin Pan 已提交
386
          CreateComputationalOps(&result, node, places_.size());
387
          const auto &data_var_names = node->Op()->Output("Out");
388
          InsertDataBalanceOp(&result, data_var_names);
F
fengjiayi 已提交
389
        } else {
X
Xin Pan 已提交
390
          CreateComputationalOps(&result, node, places_.size());
391 392
        }

C
chengduo 已提交
393 394 395
        if (!is_forwarding && places_.size() > 1) {
          // Currently, we assume that once gradient is generated, it can be
          // broadcast, and each gradient is only broadcast once.
396
          if (static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
C
chengduo 已提交
397 398 399
                                    OpProtoAndCheckerMaker::OpRoleAttrName())) &
                                static_cast<int>(OpRole::kBackward))) {
            try {
400 401
              auto backward_vars = boost::get<std::vector<std::string>>(
                  node->Op()->GetNullableAttr(
C
chengduo 已提交
402
                      OpProtoAndCheckerMaker::OpRoleVarAttrName()));
Y
yuyang18 已提交
403

C
chengduo 已提交
404
              PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
Y
yuyang18 已提交
405

C
chengduo 已提交
406 407 408 409
              for (size_t i = 0; i < backward_vars.size(); i += 2) {
                auto &p_name = backward_vars[i];
                auto &g_name = backward_vars[i + 1];
                VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;
Y
yuyang18 已提交
410

C
chengduo 已提交
411 412 413 414
                switch (strategy_.reduce_) {
                  case BuildStrategy::ReduceStrategy::kReduce:
                    cur_device_id = GetAppropriateDeviceID({g_name});
                    CreateReduceOp(&result, g_name, cur_device_id);
X
Xin Pan 已提交
415
                    graph->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
416
                        .emplace(g_name, cur_device_id);
C
chengduo 已提交
417 418 419 420 421 422 423 424 425 426 427 428 429 430
                    bcast_var_name_set[cur_device_id].emplace(p_name);
                    break;
                  case BuildStrategy::ReduceStrategy::kAllReduce:
                    if (IsSparseGradient(g_name)) {
                      CreateReduceOp(&result, g_name, 0);
                      CreateBroadcastOp(&result, g_name, 0);
                    } else {
                      InsertAllReduceOp(&result, g_name);
                    }
                    break;
                  default:
                    LOG(FATAL) << "Unknown reduce strategy ";
                    break;
                }
Y
yuyang18 已提交
431
              }
C
chengduo 已提交
432
            } catch (boost::bad_get e) {
C
chengduoZH 已提交
433
            }
Y
Yu Yang 已提交
434 435 436 437 438 439
          }
        }
      }
    }
  }

440 441 442 443 444 445 446 447 448 449 450 451 452
  bool use_gpu = false;
#ifdef PADDLE_WITH_CUDA
  use_gpu = nccl_ctxs_ != nullptr;
#endif

  if (use_gpu ||
      strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
    // Insert BCast Ops
    for (size_t dev_id = 0; dev_id < bcast_var_name_set.size(); ++dev_id) {
      auto &to_bcast_set = bcast_var_name_set[dev_id];
      for (auto &bcast_name : to_bcast_set) {
        CreateBroadcastOp(&result, bcast_name, dev_id);
      }
C
chengduoZH 已提交
453 454
    }
  }
Y
Yu Yang 已提交
455
  /*
X
Xin Pan 已提交
456 457 458
  Dependency graph has been constructed. However, there are still data
  hazards need to be handled.
 */
Y
Yu Yang 已提交
459
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
460

Y
Yu Yang 已提交
461 462 463 464
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);
X
Xin Pan 已提交
465
  PADDLE_ENFORCE(!ir::HasCircle(result));
Q
qiaolongfei 已提交
466
  return graph;
Y
Yu Yang 已提交
467 468
}

Y
Yancey1989 已提交
469 470 471
bool MultiDevSSAGraphBuilder::IsSparseGradient(const std::string &og) const {
  PADDLE_ENFORCE(all_vars_.count(og) != 0);
  if (all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS) {
C
fix ci  
chengduoZH 已提交
472 473 474
    return true;
  }
  return false;
475 476
}

477 478 479 480 481 482 483 484 485 486 487 488 489
void MultiDevSSAGraphBuilder::SetCommunicationContext(
    OpHandleBase *op_handle, const platform::Place &p) const {
#ifdef PADDLE_WITH_CUDA
  if (nccl_ctxs_ == nullptr) {
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
  }
#else
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
#endif
}

X
Xin Pan 已提交
490
void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result,
C
chengduoZH 已提交
491
                                                const std::string &p_name,
C
chengduoZH 已提交
492
                                                size_t src_dev_id) const {
C
chengduoZH 已提交
493
#ifdef PADDLE_WITH_CUDA
X
polish  
Xin Pan 已提交
494 495 496
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
C
chengduoZH 已提交
497
#else
X
polish  
Xin Pan 已提交
498 499 500
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
C
chengduoZH 已提交
501
#endif
X
Xin Pan 已提交
502
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
X
Xin Pan 已提交
503

X
Xin Pan 已提交
504
  auto *in =
X
Xin Pan 已提交
505
      result->Get<GraphVars>(kGraphVars).at(src_dev_id).at(p_name).back().get();
C
chengduoZH 已提交
506 507 508 509
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
510
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
511
    auto &vars = result->Get<GraphVars>(kGraphVars).at(i).at(p_name);
X
polish  
Xin Pan 已提交
512 513 514
    auto *out_var = new VarHandle(
        result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(),
        i, p_name, p);
C
chengduoZH 已提交
515 516 517 518 519
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
  }
}

X
Xin Pan 已提交
520
void MultiDevSSAGraphBuilder::CreateComputationalOp(ir::Graph *result,
521
                                                    ir::Node *node,
C
chengduoZH 已提交
522
                                                    int dev_id) const {
X
Xin Pan 已提交
523
  result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
524
      new ComputationOpHandle(result->CreateOpNode(node->Op()),
525 526
                              local_scopes_[dev_id], places_[dev_id]));
  CreateOpHandleIOs(result, node, dev_id);
C
chengduoZH 已提交
527 528
}

X
Xin Pan 已提交
529
void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result,
C
chengduoZH 已提交
530
                                                const std::string &og) const {
Y
Yu Yang 已提交
531
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
532
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
533 534
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
535
#else
X
Xin Pan 已提交
536
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
537 538
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
539
#endif
X
Xin Pan 已提交
540
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
Y
Yu Yang 已提交
541 542 543

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
544
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
545
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
Y
Yu Yang 已提交
546 547
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
Y
Yu Yang 已提交
548 549
    op_handle->AddInput(prev_grad.get());

X
Xin Pan 已提交
550
    auto var =
X
polish  
Xin Pan 已提交
551 552
        new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                      vars.size(), i, og, p);
Y
Yu Yang 已提交
553 554 555 556 557
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
}

558
void MultiDevSSAGraphBuilder::InsertDataBalanceOp(
X
Xin Pan 已提交
559
    ir::Graph *result, const std::vector<std::string> &datas) const {
F
fengjiayi 已提交
560
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
561
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
562 563
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
F
fengjiayi 已提交
564
#else
X
Xin Pan 已提交
565
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
566 567
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_));
F
fengjiayi 已提交
568
#endif
X
Xin Pan 已提交
569
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
570 571 572 573
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
    for (const std::string &d_name : datas) {
X
Xin Pan 已提交
574
      auto &vars = result->Get<GraphVars>(kGraphVars)[i][d_name];
575 576
      PADDLE_ENFORCE(!vars.empty());
      op_handle->AddInput(vars.back().get());
X
polish  
Xin Pan 已提交
577 578 579
      auto var = new VarHandle(
          result->CreateEmptyNode(d_name, ir::Node::Type::kVariable),
          vars.size(), i, d_name, p);
580 581 582 583 584 585
      vars.emplace_back(var);
      op_handle->AddOutput(var);
    }
  }
}

Y
Yu Yang 已提交
586 587 588 589 590 591 592 593 594 595 596 597
bool MultiDevSSAGraphBuilder::IsParameterGradientOnce(
    const std::string &og,
    std::unordered_set<std::string> *og_has_been_broadcast) const {
  bool is_pg_once =
      grad_names_.count(og) != 0 && og_has_been_broadcast->count(og) == 0;
  if (is_pg_once) {
    // Insert NCCL AllReduce Op
    og_has_been_broadcast->insert(og);
  }
  return is_pg_once;
}

X
Xin Pan 已提交
598 599
int MultiDevSSAGraphBuilder::GetOpDeviceID(const ir::Graph &graph,
                                           ir::Node *node) const {
Y
yuyang18 已提交
600
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
C
chengduoZH 已提交
601 602
    return -1;
  }
603
  int op_role = boost::get<int>(
604
      node->Op()->GetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
605 606
  if (op_role != static_cast<int>(framework::OpRole::kOptimize)) {
    return -1;
C
chengduoZH 已提交
607
  }
608
  auto param_grad = boost::get<std::vector<std::string>>(
X
Xin Pan 已提交
609
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
610 611

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
X
Xin Pan 已提交
612
  int dev_id = GetVarDeviceID(graph, param_grad[1]);
X
Xin Pan 已提交
613 614
  PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]",
                    node->Op()->Type(), param_grad[0], param_grad[1]);
615
  return dev_id;
616 617
}

X
Xin Pan 已提交
618 619
int MultiDevSSAGraphBuilder::GetVarDeviceID(const ir::Graph &graph,
                                            const std::string &varname) const {
X
Xin Pan 已提交
620
  auto &sharded_var_device = graph.Get<ShardedVarDevice>(kShardedVarDevice);
X
Xin Pan 已提交
621 622
  auto got = sharded_var_device.find(varname);
  return got == sharded_var_device.end() ? -1 : got->second;
C
chengduoZH 已提交
623 624
}

625 626
void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(
    ir::Graph *result, const std::string &loss_grad_name) const {
Y
Yu Yang 已提交
627 628 629
  for (size_t i = 0; i < places_.size(); ++i) {
// Insert ScaleCost OpHandle
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
630 631 632
    auto *communication_dev_ctx =
        nccl_ctxs_ ? nccl_ctxs_->DevCtx(places_[i])
                   : platform::DeviceContextPool::Instance().Get(places_[i]);
Y
Yu Yang 已提交
633 634 635 636
#else
    auto *communication_dev_ctx =
        platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
#endif
X
Xin Pan 已提交
637
    auto *op_handle = new ScaleLossGradOpHandle(
X
polish  
Xin Pan 已提交
638 639 640
        result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation),
        local_scopes_.size(), local_scopes_[i], places_[i],
        communication_dev_ctx);
X
Xin Pan 已提交
641
    result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
Y
Yu Yang 已提交
642 643 644 645 646 647 648

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

649 650 651 652
    CreateOpOutput(
        result, op_handle,
        result->CreateEmptyNode(loss_grad_name, ir::Node::Type::kVariable),
        places_[i], i);
Y
Yu Yang 已提交
653 654 655
  }
}

X
Xin Pan 已提交
656
void MultiDevSSAGraphBuilder::CreateComputationalOps(ir::Graph *result,
657
                                                     ir::Node *node,
T
typhoonzero 已提交
658 659
                                                     size_t num_places) const {
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
660 661
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
X
Xin Pan 已提交
662
    result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
663
        new ComputationOpHandle(result->CreateOpNode(node->Op()), s, p));
664
    CreateOpHandleIOs(result, node, scope_idx);
Y
Yu Yang 已提交
665 666 667
  }
}

X
Xin Pan 已提交
668
VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result,
C
chengduoZH 已提交
669 670
                                                   const std::string &og,
                                                   int dst_dev_id) const {
C
chengduoZH 已提交
671
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
672
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
673 674
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
675
#else
X
Xin Pan 已提交
676
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
677 678
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
679
#endif
X
Xin Pan 已提交
680
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
C
chengduoZH 已提交
681 682 683

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
684
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
685
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
C
chengduoZH 已提交
686 687 688 689
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
    op_handle->AddInput(prev_grad.get());
  }
X
Xin Pan 已提交
690
  auto &vars = result->Get<GraphVars>(kGraphVars)[dst_dev_id][og];
X
polish  
Xin Pan 已提交
691 692 693
  auto var =
      new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                    vars.size(), dst_dev_id, og, places_[dst_dev_id]);
C
chengduoZH 已提交
694 695 696 697 698
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

699 700
// Find the first occurence of `prev_op_name` and make current `op` depend
// on it.
X
Xin Pan 已提交
701
void MultiDevSSAGraphBuilder::ConnectOp(ir::Graph *result, OpHandleBase *op,
Y
fix pe  
Yancey1989 已提交
702
                                        const std::string &prev_op_name) const {
X
Xin Pan 已提交
703
  for (auto &prev_op : result->Get<GraphOps>(kGraphOps)) {
Y
fix pe  
Yancey1989 已提交
704
    if (prev_op->Name() == prev_op_name) {
X
Xin Pan 已提交
705
      auto *dep_var = new DummyVarHandle(result->CreateControlDepVar());
Y
Yancey1989 已提交
706
      prev_op->AddOutput(dep_var);
X
Xin Pan 已提交
707
      result->Get<GraphDepVars>(kGraphDepVars).emplace(dep_var);
Y
fix pe  
Yancey1989 已提交
708
      op->AddInput(dep_var);
Y
Yancey1989 已提交
709 710 711 712
    }
  }
}

X
Xin Pan 已提交
713
void MultiDevSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result,
714
                                                ir::Node *node) const {
Y
Yancey1989 已提交
715
  int op_dev_id = -1;
716 717 718
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
X
Xin Pan 已提交
719
    input_var_names.push_back(input->Name());
720 721
  }
  for (ir::Node *output : node->outputs) {
X
Xin Pan 已提交
722
    output_var_names.push_back(output->Name());
723 724 725 726
  }

  if (node->Op()->Type() == "split_byref" ||
      node->Op()->Type() == "split_selected_rows") {
X
Xin Pan 已提交
727
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
728
    op_dev_id = GetVarDeviceID(*result, input_var_names[0]);
Y
Yancey1989 已提交
729
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
730 731
      op_dev_id = GetAppropriateDeviceID(input_var_names);
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
732
        result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
733
            .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
734 735
      }
    }
736
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
737
      result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
738
          .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
739
    }
740
  } else if (node->Op()->Type() == "concat") {
X
Xin Pan 已提交
741
    op_dev_id = GetVarDeviceID(*result, input_var_names[0]);
742
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
743
      result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
744
          .emplace(varname, op_dev_id);
Y
yi.wu 已提交
745
    }
Y
Yancey1989 已提交
746
  } else {
W
Wu Yi 已提交
747
    PADDLE_THROW(
Y
Yancey1989 已提交
748 749 750 751 752
        "the distribute training related op should be in [split_byref, "
        "concat].");
  }

  PADDLE_ENFORCE(op_dev_id != -1,
753 754
                 "can not find right place for distributed op: %s",
                 node->Op()->Type());
Y
Yancey1989 已提交
755

756
  CreateComputationalOp(result, node, op_dev_id);
W
Wu Yi 已提交
757 758 759 760 761 762 763 764 765 766 767 768 769 770
}

void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) {
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
  for (ir::Node *input : node->inputs) {
    VarHandle *var = nullptr;
    for (int place_offset = 0; place_offset < num_places; ++place_offset) {
      auto &var_holders = result->Get<GraphVars>(kGraphVars)[place_offset];
      auto &var_holder = var_holders[input->Name()];
      if (!var_holder.empty()) {
        var = var_holder.rbegin()->get();
        op_handle->AddInput(var);
      }
    }
Y
Yancey1989 已提交
771 772 773
  }
}

774
// Create RPC related op handles that connects its in ops and out ops.
X
Xin Pan 已提交
775 776
void MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result,
                                          ir::Node *node) const {
Y
Yancey1989 已提交
777
  int op_dev_id = -1;
778
  if (node->Op()->Type() == "send") {
X
Xin Pan 已提交
779
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
780
    op_dev_id = GetVarDeviceID(*result, node->inputs[0]->Name());
X
Xin Pan 已提交
781 782
    PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
                   "This hack no longer holds, please fix.");
Y
Yancey1989 已提交
783 784 785
    // the variable name which contains .block means it was splited by
    // split_byref op
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce &&
X
Xin Pan 已提交
786
        node->inputs[0]->Name().find(".block") == std::string::npos) {
787 788
      std::vector<std::string> input_var_names;
      for (ir::Node *n : node->inputs) {
X
Xin Pan 已提交
789
        input_var_names.push_back(n->Name());
790
      }
W
Wu Yi 已提交
791 792 793 794 795 796
      auto send_param_grad = boost::get<std::vector<std::string>>(
          node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
      op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
      VLOG(10) << "send grad " << input_var_names[0] << " origin "
               << send_param_grad[1] << " place: " << op_dev_id;
797
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
798
        result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
799
            .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
800
      }
W
Wu Yi 已提交
801 802
      result->Get<ShardedVarDevice>(kShardedVarDevice)
          .emplace(send_param_grad[1], op_dev_id);
Y
Yancey1989 已提交
803
    }
804 805 806
  } else if (node->Op()->Type() == "recv") {
    std::vector<std::string> output_var_names;
    for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
807
      output_var_names.push_back(n->Name());
808
    }
W
Wu Yi 已提交
809 810 811 812 813 814 815 816 817 818
    auto recv_param_grad = boost::get<std::vector<std::string>>(
        node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
    if (recv_param_grad.size() == 2U) {
      op_dev_id = GetVarDeviceID(*result, recv_param_grad[1]);
      VLOG(10) << "recv param " << recv_param_grad[0]
               << " get grad place: " << recv_param_grad[1]
               << " place: " << op_dev_id;
    } else {
      op_dev_id = GetAppropriateDeviceID(output_var_names);
    }
819
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
820
      result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
821
          .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
822 823
    }
  } else {
W
Wu Yi 已提交
824
    // send_barrier, fetch_barrier will run on place 0;
Y
Yancey1989 已提交
825 826 827 828
    op_dev_id = 0;
  }

  PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s",
829
                 node->Op()->Type());
X
Xin Pan 已提交
830
  result->Get<GraphOps>(kGraphOps).emplace_back(new RPCOpHandle(
831 832
      result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id],
      node->Op()->Type(), places_[op_dev_id]));
Y
fix pe  
Yancey1989 已提交
833

W
Wu Yi 已提交
834 835
  if (node->Op()->Type() == "send") {
    CreateOpHandleIOs(result, node, op_dev_id);
Y
Yancey1989 已提交
836
  } else {
W
Wu Yi 已提交
837 838 839 840 841 842
    // send_barrier, recv, fetch_barrier's inputs are deps var, get them from
    // all places
    auto p = places_[op_dev_id];
    auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
Y
Yancey1989 已提交
843

W
Wu Yi 已提交
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
    SetOpInputsAllPlaces(result, node, places_.size());
    for (ir::Node *output : node->outputs) {
      int outvar_dev_id = op_dev_id;
      if (node->Op()->Type() == "fetch_barrier") {
        outvar_dev_id = GetVarDeviceID(*result, output->Name());
        PADDLE_ENFORCE_NE(outvar_dev_id, -1);
      }
      p = places_[outvar_dev_id];
      ir::Node *new_node = nullptr;
      if (output->Var()) {
        new_node = result->CreateVarNode(output->Var());
      } else {
        new_node =
            result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
      }
      CreateOpOutput(result, op_handle, new_node, p, outvar_dev_id);
    }
  }
Y
Yu Yang 已提交
862 863
}

864
bool MultiDevSSAGraphBuilder::IsScaleLossOp(ir::Node *node) const {
Y
yuyang18 已提交
865
  return boost::get<int>(
866
             node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Fix bug  
yuyang18 已提交
867 868 869
             (static_cast<int>(OpRole::kBackward) |
              static_cast<int>(OpRole::kLoss)) &&
         !loss_var_name_.empty();  // If loss_var is empty. This is test mode
Y
Yu Yang 已提交
870
}
Y
Yu Yang 已提交
871 872 873
}  // namespace details
}  // namespace framework
}  // namespace paddle
X
Xin Pan 已提交
874

X
Xin Pan 已提交
875
REGISTER_PASS(multi_devices_pass,
X
Xin Pan 已提交
876 877 878 879 880 881
              paddle::framework::details::MultiDevSSAGraphBuilder)
    .RequirePassAttr(paddle::framework::details::kLossVarName)
    .RequirePassAttr(paddle::framework::details::kPlaces)
    .RequirePassAttr(paddle::framework::details::kParams)
    .RequirePassAttr(paddle::framework::details::kLocalScopes)
    .RequirePassAttr(paddle::framework::details::kStrategy);