multi_devices_graph_pass.cc 34.0 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
C
chengduoZH 已提交
14
#include <algorithm>
Y
Yancey1989 已提交
15
#include <fstream>
C
chengduoZH 已提交
16
#include <string>
C
chengduoZH 已提交
17
#include <utility>
C
chengduoZH 已提交
18 19
#include <vector>

20
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
C
chengduoZH 已提交
21
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
22
#include "paddle/fluid/framework/details/computation_op_handle.h"
23
#include "paddle/fluid/framework/details/data_balance_op_handle.h"
24
#include "paddle/fluid/framework/details/fused_broadcast_op_handle.h"
X
Xin Pan 已提交
25
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
C
chengduoZH 已提交
26
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yancey1989 已提交
27
#include "paddle/fluid/framework/details/rpc_op_handle.h"
Y
Yu Yang 已提交
28
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
X
better  
Xin Pan 已提交
29
#include "paddle/fluid/framework/ir/graph_helper.h"
X
Xin Pan 已提交
30
#include "paddle/fluid/framework/ir/node.h"
Y
Fix bug  
yuyang18 已提交
31
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
32
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
33

Y
Yu Yang 已提交
34 35 36
namespace paddle {
namespace framework {
namespace details {
X
Xin Pan 已提交
37

X
Xin Pan 已提交
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
namespace {
void PolishGraphToSupportDataHazards(ir::Graph *graph) {
  for (auto &var_map : graph->Get<GraphVars>(kGraphVars)) {
    for (auto &name_pair : var_map) {
      if (name_pair.second.size() <= 1) {
        continue;
      }
      auto it_new = name_pair.second.rbegin();
      auto it_old = name_pair.second.rbegin();
      ++it_old;
      for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) {
        OpHandleBase *write_op = (*it_new)->GeneratedOp();
        const auto &read_ops = (*it_old)->PendingOps();

        for (auto *read_op : read_ops) {
          // Manually add a dependency var from read_op to write_op;
          if (read_op == write_op) {
            // Read Write is the same op.
            continue;
          }
          bool has_dep = false;
          for (auto *r_out : read_op->Outputs()) {
            for (auto *w_in : write_op->Inputs()) {
              if (r_out->Node() == w_in->Node()) {
                has_dep = true;
                break;
              }
            }
          }
          if (has_dep) continue;

          auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar());
          read_op->AddOutput(dep_var);
          write_op->AddInput(dep_var);
          graph->Get<GraphDepVars>(kGraphDepVars).emplace(dep_var);
        }
      }
    }
  }
}

VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node,
                                      const platform::Place &place,
                                      size_t place_offset) {
  auto &var_holders = graph->Get<GraphVars>(kGraphVars)[place_offset];
  auto &var_holder = var_holders[node->Name()];
  VarHandle *var = nullptr;
  if (var_holder.empty()) {
    if (node->Var()) {
      var = new VarHandle(graph->CreateVarNode(node->Var()), 0, place_offset,
                          node->Name(), place);
    } else {
      var = new VarHandle(
          graph->CreateEmptyNode(node->Name(), ir::Node::Type::kVariable), 0,
          place_offset, node->Name(), place);
    }
    var_holder.emplace_back(var);
  } else {
    var = var_holder.rbegin()->get();
  }
  return var;
}

void CreateOpOutput(ir::Graph *graph, OpHandleBase *op_handle,
                    ir::Node *new_node, const platform::Place &place,
                    size_t place_offset) {
  auto &vars =
      graph->Get<GraphVars>(kGraphVars)[place_offset][new_node->Name()];
  size_t version = vars.size();
  auto var =
      new VarHandle(new_node, version, place_offset, new_node->Name(), place);
  vars.emplace_back(var);
  op_handle->AddOutput(var);
}

void AddOutputToLeafOps(ir::Graph *graph) {
  for (auto &op : graph->Get<GraphOps>(kGraphOps)) {
    if (!op->Outputs().empty()) {
      continue;
    }
    auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar());
    graph->Get<GraphDepVars>(kGraphDepVars).emplace(dummy_leaf);
    op->AddOutput(dummy_leaf);
  }
}
}  // namespace
Y
Yu Yang 已提交
124

X
Xin Pan 已提交
125 126 127 128 129 130
static const char kLossVarName[] = "loss_var_name";
static const char kPlaces[] = "places";
static const char kParams[] = "params";
static const char kLocalScopes[] = "local_scopes";
static const char kStrategy[] = "strategy";

X
Xin Pan 已提交
131
void MultiDevSSAGraphBuilder::Init() const {
X
clean  
Xin Pan 已提交
132 133 134
  all_vars_.clear();
  balance_vars_.clear();

X
Xin Pan 已提交
135 136 137 138
  loss_var_name_ = Get<const std::string>(kLossVarName);
  places_ = Get<const std::vector<platform::Place>>(kPlaces);
  local_scopes_ = Get<const std::vector<Scope *>>(kLocalScopes);
  strategy_ = Get<const BuildStrategy>(kStrategy);
Y
Yu Yang 已提交
139
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
140
  nccl_ctxs_ = &Get<platform::NCCLContextMap>("nccl_ctxs");
Y
Yu Yang 已提交
141
#endif
X
Xin Pan 已提交
142

X
Xin Pan 已提交
143
  for (auto &p : Get<const std::unordered_set<std::string>>(kParams)) {
Y
Yu Yang 已提交
144 145
    grad_names_.insert(GradVarName(p));
  }
Y
Yancey1989 已提交
146
  balance_vars_.resize(places_.size(), 0);
Y
yuyang18 已提交
147 148 149 150 151
  if (strategy_.enable_data_balance_ && places_.size() == 1) {
    LOG(WARNING) << "It is no need to enable data balance when there is only "
                    "one place. enable_data_balance is set to False.";
    strategy_.enable_data_balance_ = false;
  }
Y
Yu Yang 已提交
152 153
}

X
Xin Pan 已提交
154 155
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(ir::Graph *result,
                                                ir::Node *node,
Y
Yu Yang 已提交
156 157
                                                size_t place_id) const {
  auto p = places_[place_id];
X
Xin Pan 已提交
158
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
X
Xin Pan 已提交
159 160
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
T
wip  
typhoonzero 已提交
161

162 163
  for (ir::Node *input : node->inputs) {
    VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id);
T
wip  
typhoonzero 已提交
164 165 166
    op_handle->AddInput(var);
  }

167
  for (ir::Node *output : node->outputs) {
X
polish  
Xin Pan 已提交
168 169 170 171 172 173 174 175
    ir::Node *new_node = nullptr;
    if (output->Var()) {
      new_node = result->CreateVarNode(output->Var());
    } else {
      new_node =
          result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
    }
    CreateOpOutput(result, op_handle, new_node, p, place_id);
T
wip  
typhoonzero 已提交
176 177
  }
}
Y
fix pe  
Yancey1989 已提交
178 179

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainSendVars(
X
Xin Pan 已提交
180
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
181
  std::vector<std::string> send_vars;
Y
Yancey1989 已提交
182 183
  // since parameters are all in block 0,
  // it's enough to only scan send ops in block 0
184 185
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
186 187
    // TODO(Yancey1989): use a graceful method to find send op,
    // instead of the the hard code string
188
    if (op->Type() == "send") {
Y
fix pe  
Yancey1989 已提交
189 190 191 192 193 194 195 196 197 198
      auto op_vars = op->InputArgumentNames();
      send_vars.reserve(send_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      send_vars.insert(send_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return send_vars;
}

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainRecvVars(
X
Xin Pan 已提交
199
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
200
  std::vector<std::string> recv_vars;
201 202
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
203 204 205
    // TODO(Yancey1989): use a graceful method to find recv op,
    // instead of the hard code string
    if (op->Type() == "recv") {
Y
fix pe  
Yancey1989 已提交
206 207 208 209 210 211 212 213 214
      auto op_vars = op->OutputArgumentNames();
      recv_vars.reserve(recv_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      recv_vars.insert(recv_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return recv_vars;
}

Y
Yancey1989 已提交
215 216 217 218
size_t MultiDevSSAGraphBuilder::GetAppropriateDeviceID(
    const std::vector<std::string> &var_names) const {
  int64_t numel_sum = 0;
  for (auto var_name : var_names) {
X
Xin Pan 已提交
219
    if (all_vars_.find(var_name) == all_vars_.end()) continue;
Y
Yancey1989 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
    auto var_desc = all_vars_.at(var_name);
    PADDLE_ENFORCE_NOT_NULL(var_desc);
    auto dim = framework::make_ddim(var_desc->GetShape());
    int64_t numel = framework::product(dim);
    PADDLE_ENFORCE_GT(numel, 0);
    numel_sum += numel;
  }

  auto smallest =
      std::min_element(std::begin(balance_vars_), std::end(balance_vars_));
  size_t dev_id =
      static_cast<size_t>(std::distance(std::begin(balance_vars_), smallest));
  balance_vars_[dev_id] += numel_sum;
  return dev_id;
}

X
better  
Xin Pan 已提交
236 237 238 239 240
// Topology sort the graph nodes from inputs to outputs.
// Since SSAGraphBuilder depends on forward/backward nodes to assign devices
// to parameter/gradients before optimizer ops, topo sort is insufficient. (
// some optimizer ops might not depend on any nodes), we manually move all
// optimizer nodes after last backward nodes.
X
Xin Pan 已提交
241 242 243
// However, the assumption by SSAGraphBuilder should be relaxed in the future.
std::vector<ir::Node *> SortOpsAndDelayOptimizeOp(const ir::Graph &graph) {
  std::vector<ir::Node *> ret = ir::TopologySortOperations(graph);
X
better  
Xin Pan 已提交
244 245 246 247 248
  size_t last_backward = 0;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (boost::get<int>(
            ret[i]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
        static_cast<int>(OpRole::kBackward)) {
X
Xin Pan 已提交
249
      last_backward = i;
X
better  
Xin Pan 已提交
250 251 252
    }
  }

X
Xin Pan 已提交
253 254 255 256
  std::vector<ir::Node *> optimize_ops;
  std::vector<ir::Node *> sorted_ret;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (i < last_backward) {
X
Xin Pan 已提交
257 258 259
      if (static_cast<bool>(boost::get<int>(ret[i]->Op()->GetAttr(
                                OpProtoAndCheckerMaker::OpRoleAttrName())) &
                            static_cast<int>(OpRole::kOptimize))) {
X
Xin Pan 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
        optimize_ops.push_back(ret[i]);
      } else {
        sorted_ret.push_back(ret[i]);
      }
    } else if (i == last_backward) {
      sorted_ret.push_back(ret[i]);
      // Verify that no operations before optimize ops depends on optimize ops.
      std::unordered_set<ir::Node *> optimize_set(optimize_ops.begin(),
                                                  optimize_ops.end());
      for (ir::Node *n : sorted_ret) {
        for (ir::Node *in : n->inputs) {
          for (ir::Node *pre_n : in->inputs) {
            PADDLE_ENFORCE(optimize_set.find(pre_n) == optimize_set.end(),
                           "optimize operations cannot be depended by forward "
                           "or backward node %s -> %s",
                           pre_n->Name(), n->Name());
          }
        }
X
Xin Pan 已提交
278
      }
X
Xin Pan 已提交
279 280 281 282
      sorted_ret.insert(sorted_ret.end(), optimize_ops.begin(),
                        optimize_ops.end());
    } else {
      sorted_ret.push_back(ret[i]);
X
Xin Pan 已提交
283 284
    }
  }
X
better  
Xin Pan 已提交
285 286 287
  return sorted_ret;
}

X
Xin Pan 已提交
288
std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilder::ApplyImpl(
X
Xin Pan 已提交
289
    std::unique_ptr<ir::Graph> graph) const {
X
Xin Pan 已提交
290
  Init();
X
Xin Pan 已提交
291
  // Give the topology sort order and rebuild the graph structure.
X
better  
Xin Pan 已提交
292
  std::vector<ir::Node *> sorted_ops = SortOpsAndDelayOptimizeOp(*graph);
X
Xin Pan 已提交
293 294
  auto nodes = graph->ReleaseNodes();
  ir::Graph &result = *graph;
295 296

  for (auto &node : nodes) {
X
Xin Pan 已提交
297
    if (node->IsVar() && node->Var()) {
X
Xin Pan 已提交
298
      all_vars_.emplace(node->Name(), node->Var());
299
    }
C
fix ci  
chengduoZH 已提交
300
  }
C
chengduoZH 已提交
301
  std::unordered_set<std::string> og_has_been_broadcast;
Y
Yu Yang 已提交
302 303

  // We cannot invoke resize. It is a bug of GCC 4.8
X
Xin Pan 已提交
304 305 306
  result.Set(kGraphVars, new GraphVars(places_.size()));
  result.Set(kGraphDepVars, new GraphDepVars);
  result.Set(kGraphOps, new GraphOps);
307

Y
fix pe  
Yancey1989 已提交
308
  // find send/recv vars so that we can place the distributed training
309
  // related op in the place 0
X
Xin Pan 已提交
310 311
  auto send_vars = FindDistTrainSendVars(sorted_ops);
  auto recv_vars = FindDistTrainRecvVars(sorted_ops);
T
typhoonzero 已提交
312

C
chengduoZH 已提交
313 314 315
  std::vector<std::unordered_set<std::string>> bcast_var_name_set;
  bcast_var_name_set.resize(places_.size());

C
chengduoZH 已提交
316
  size_t cur_device_id = 0;
Y
Yu Yang 已提交
317
  bool is_forwarding = true;
Y
Yancey1989 已提交
318
  bool is_dist_train = false;
319

X
Xin Pan 已提交
320 321
  std::unordered_map<std::string, int> sharded_var_device;

X
better  
Xin Pan 已提交
322
  for (ir::Node *node : sorted_ops) {
Y
Yancey1989 已提交
323
    if (boost::get<int>(
324
            node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Yancey1989 已提交
325
        static_cast<int>(OpRole::kRPC)) {
X
Xin Pan 已提交
326
      int op_dev_id = CreateRPCOp(&result, node, &sharded_var_device);
Y
Yancey1989 已提交
327 328 329 330 331 332 333 334 335 336 337 338
      PADDLE_ENFORCE(op_dev_id != -1,
                     "Can not schedule the RPC operator to the right place.");
      if (node->Op()->Type() == "recv") {
        auto recv_vars_attr =
            boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
                OpProtoAndCheckerMaker::OpRoleVarAttrName()));
        PADDLE_ENFORCE(recv_vars_attr.size() == 2UL);  // [parameter, gradient]
        if (recv_vars_attr[0].find(".block") == std::string::npos) {
          bcast_var_name_set[op_dev_id].emplace(recv_vars_attr[0]);
        }
      }
      is_dist_train = true;
339 340 341
    } else if (boost::get<int>(node->Op()->GetAttr(
                   OpProtoAndCheckerMaker::OpRoleAttrName())) ==
               static_cast<int>(OpRole::kDist)) {
X
Xin Pan 已提交
342
      int op_dev_id = CreateDistTrainOp(&result, node, &sharded_var_device);
Y
Yancey1989 已提交
343 344 345 346
      if (node->Op()->Type() == "concat") {
        auto origin_param_name = node->Op()->OutputArgumentNames()[0];
        bcast_var_name_set[op_dev_id].emplace(origin_param_name);
      }
X
Xin Pan 已提交
347
    } else if (IsScaleLossOp(node)) {
Y
Yu Yang 已提交
348
      // user can customize loss@grad if not use_default_grad_scale_
Y
yuyang18 已提交
349 350
      if (strategy_.gradient_scale_ !=
          BuildStrategy::GradientScaleStrategy::kCustomized) {
X
Xin Pan 已提交
351
        // TODO(paddle-dev): Why is there no input for this op_handle?
352
        auto loss_grad_name = node->Op()->OutputArgumentNames()[0];
353
        CreateScaleLossGradOp(&result, loss_grad_name, node->outputs[0]);
Y
Yu Yang 已提交
354
      }
355 356 357 358
      // This assumes the backward generating code will ensure IsScaleLossOp
      // is true only for the op that scale the final scalar loss.
      // It also assumes backward op will always follow the forward op in
      // the block.
Y
Yu Yang 已提交
359
      is_forwarding = false;
Y
Yu Yang 已提交
360
    } else {
X
Xin Pan 已提交
361
      int op_dev_id = GetOpDeviceID(result, node, sharded_var_device);
C
chengduo 已提交
362
      if (op_dev_id != -1) {  // This op only runs on one specific device.
X
Xin Pan 已提交
363
        CreateComputationalOp(&result, node, op_dev_id);
364
        for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
365
          sharded_var_device.emplace(n->Name(), op_dev_id);
C
chengduoZH 已提交
366
        }
C
chengduo 已提交
367 368 369
      } else {
        // This op runs on all devices, and its output may have parameter's
        // gradients.
X
Xin Pan 已提交
370
        // TODO(paddle-dev): Why is so special about "read" op?
371 372
        if (node->Op()->Type() == "read" && strategy_.enable_data_balance_) {
          node->Op()->SetAttr("throw_eof_exp", false);
X
Xin Pan 已提交
373
          CreateComputationalOps(&result, node, places_.size());
374
          const auto &data_var_names = node->Op()->Output("Out");
375
          InsertDataBalanceOp(&result, data_var_names);
F
fengjiayi 已提交
376
        } else {
X
Xin Pan 已提交
377
          CreateComputationalOps(&result, node, places_.size());
378 379
        }

C
chengduo 已提交
380 381 382
        if (!is_forwarding && places_.size() > 1) {
          // Currently, we assume that once gradient is generated, it can be
          // broadcast, and each gradient is only broadcast once.
383
          if (static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
C
chengduo 已提交
384 385 386
                                    OpProtoAndCheckerMaker::OpRoleAttrName())) &
                                static_cast<int>(OpRole::kBackward))) {
            try {
387 388
              auto backward_vars = boost::get<std::vector<std::string>>(
                  node->Op()->GetNullableAttr(
C
chengduo 已提交
389
                      OpProtoAndCheckerMaker::OpRoleVarAttrName()));
Y
yuyang18 已提交
390

C
chengduo 已提交
391
              PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
Y
yuyang18 已提交
392

C
chengduo 已提交
393 394 395 396
              for (size_t i = 0; i < backward_vars.size(); i += 2) {
                auto &p_name = backward_vars[i];
                auto &g_name = backward_vars[i + 1];
                VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;
Y
yuyang18 已提交
397

C
chengduo 已提交
398 399 400 401
                switch (strategy_.reduce_) {
                  case BuildStrategy::ReduceStrategy::kReduce:
                    cur_device_id = GetAppropriateDeviceID({g_name});
                    CreateReduceOp(&result, g_name, cur_device_id);
X
Xin Pan 已提交
402
                    sharded_var_device.emplace(g_name, cur_device_id);
Y
Yancey1989 已提交
403 404 405
                    if (!is_dist_train) {
                      bcast_var_name_set[cur_device_id].emplace(p_name);
                    }
C
chengduo 已提交
406 407 408 409 410 411 412 413 414 415 416 417 418
                    break;
                  case BuildStrategy::ReduceStrategy::kAllReduce:
                    if (IsSparseGradient(g_name)) {
                      CreateReduceOp(&result, g_name, 0);
                      CreateBroadcastOp(&result, g_name, 0);
                    } else {
                      InsertAllReduceOp(&result, g_name);
                    }
                    break;
                  default:
                    LOG(FATAL) << "Unknown reduce strategy ";
                    break;
                }
Y
yuyang18 已提交
419
              }
C
chengduo 已提交
420
            } catch (boost::bad_get e) {
C
chengduoZH 已提交
421
            }
Y
Yu Yang 已提交
422 423 424 425 426
          }
        }
      }
    }
  }
427 428 429 430 431
  bool use_gpu = false;
#ifdef PADDLE_WITH_CUDA
  use_gpu = nccl_ctxs_ != nullptr;
#endif

Y
Yancey1989 已提交
432 433 434 435 436
  // Insert broadcast operators principle:
  // 1. Broadcast optimized parameters in Reduce strategy;
  // 2. No need broadcast optimized parameters in AllReduce strategy because of
  //    the optimization sub-graph would be run on every GPU;
  // 3. Allways broadcast received parameters in Distribute Training.
Y
Yancey1989 已提交
437 438 439
  if ((use_gpu &&
       strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce) ||
      is_dist_train) {
440 441 442 443 444 445 446 447
    if (strategy_.fuse_broadcast_op_) {
      CreateFusedBroadcastOp(&result, bcast_var_name_set);
    } else {
      for (size_t dev_id = 0; dev_id < bcast_var_name_set.size(); ++dev_id) {
        auto &to_bcast_set = bcast_var_name_set[dev_id];
        for (auto &bcast_name : to_bcast_set) {
          CreateBroadcastOp(&result, bcast_name, dev_id);
        }
448
      }
C
chengduoZH 已提交
449 450
    }
  }
Y
Yu Yang 已提交
451
  /*
X
Xin Pan 已提交
452 453 454
  Dependency graph has been constructed. However, there are still data
  hazards need to be handled.
 */
Y
Yu Yang 已提交
455
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
456

Y
Yu Yang 已提交
457 458 459 460
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);
X
Xin Pan 已提交
461
  PADDLE_ENFORCE(!ir::HasCircle(result));
Q
qiaolongfei 已提交
462
  return graph;
Y
Yu Yang 已提交
463 464
}

Y
Yancey1989 已提交
465 466 467
bool MultiDevSSAGraphBuilder::IsSparseGradient(const std::string &og) const {
  PADDLE_ENFORCE(all_vars_.count(og) != 0);
  if (all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS) {
C
fix ci  
chengduoZH 已提交
468 469 470
    return true;
  }
  return false;
471 472
}

473 474 475 476 477 478 479 480 481 482 483 484 485
void MultiDevSSAGraphBuilder::SetCommunicationContext(
    OpHandleBase *op_handle, const platform::Place &p) const {
#ifdef PADDLE_WITH_CUDA
  if (nccl_ctxs_ == nullptr) {
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
  }
#else
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
#endif
}

X
Xin Pan 已提交
486
void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result,
C
chengduoZH 已提交
487
                                                const std::string &p_name,
C
chengduoZH 已提交
488
                                                size_t src_dev_id) const {
C
chengduoZH 已提交
489
#ifdef PADDLE_WITH_CUDA
X
polish  
Xin Pan 已提交
490 491 492
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
C
chengduoZH 已提交
493
#else
X
polish  
Xin Pan 已提交
494 495 496
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
C
chengduoZH 已提交
497
#endif
X
Xin Pan 已提交
498
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
X
Xin Pan 已提交
499

X
Xin Pan 已提交
500
  auto *in =
X
Xin Pan 已提交
501
      result->Get<GraphVars>(kGraphVars).at(src_dev_id).at(p_name).back().get();
C
chengduoZH 已提交
502 503 504 505
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
506
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
507
    auto &vars = result->Get<GraphVars>(kGraphVars).at(i).at(p_name);
X
polish  
Xin Pan 已提交
508 509 510
    auto *out_var = new VarHandle(
        result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(),
        i, p_name, p);
C
chengduoZH 已提交
511 512 513 514 515
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
  }
}

516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
void MultiDevSSAGraphBuilder::CreateFusedBroadcastOp(
    ir::Graph *result,
    const std::vector<std::unordered_set<std::string>> &bcast_varnames) const {
#ifdef PADDLE_WITH_CUDA
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
#else
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
#endif
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
  }

  for (size_t dev_id = 0; dev_id < bcast_varnames.size(); ++dev_id) {
    for (auto &p_name : bcast_varnames[dev_id]) {
      auto *in =
          result->Get<GraphVars>(kGraphVars).at(dev_id).at(p_name).back().get();
      op_handle->AddInput(in);
      for (size_t out_dev_id = 0; out_dev_id < places_.size(); ++out_dev_id) {
        auto &p = places_[out_dev_id];
        auto &vars =
            result->Get<GraphVars>(kGraphVars).at(out_dev_id).at(p_name);
        auto *out_var = new VarHandle(
            result->CreateEmptyNode(p_name, ir::Node::Type::kVariable),
            vars.size(), out_dev_id, p_name, p);
        vars.emplace_back(out_var);
        op_handle->AddOutput(out_var);
      }
    }
  }
}

X
Xin Pan 已提交
554
void MultiDevSSAGraphBuilder::CreateComputationalOp(ir::Graph *result,
555
                                                    ir::Node *node,
C
chengduoZH 已提交
556
                                                    int dev_id) const {
X
Xin Pan 已提交
557
  result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
558
      new ComputationOpHandle(result->CreateOpNode(node->Op()),
559 560
                              local_scopes_[dev_id], places_[dev_id]));
  CreateOpHandleIOs(result, node, dev_id);
C
chengduoZH 已提交
561 562
}

X
Xin Pan 已提交
563
void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result,
C
chengduoZH 已提交
564
                                                const std::string &og) const {
Y
Yu Yang 已提交
565
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
566
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
567 568
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
569
#else
X
Xin Pan 已提交
570
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
571 572
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
573
#endif
X
Xin Pan 已提交
574
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
Y
Yu Yang 已提交
575 576 577

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
578
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
579
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
Y
Yu Yang 已提交
580 581
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
Y
Yu Yang 已提交
582 583
    op_handle->AddInput(prev_grad.get());

X
Xin Pan 已提交
584
    auto var =
X
polish  
Xin Pan 已提交
585 586
        new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                      vars.size(), i, og, p);
Y
Yu Yang 已提交
587 588 589 590 591
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
}

592
void MultiDevSSAGraphBuilder::InsertDataBalanceOp(
X
Xin Pan 已提交
593
    ir::Graph *result, const std::vector<std::string> &datas) const {
F
fengjiayi 已提交
594
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
595
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
596 597
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
F
fengjiayi 已提交
598
#else
X
Xin Pan 已提交
599
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
600 601
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_));
F
fengjiayi 已提交
602
#endif
X
Xin Pan 已提交
603
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
604 605 606 607
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
    for (const std::string &d_name : datas) {
X
Xin Pan 已提交
608
      auto &vars = result->Get<GraphVars>(kGraphVars)[i][d_name];
609 610
      PADDLE_ENFORCE(!vars.empty());
      op_handle->AddInput(vars.back().get());
X
polish  
Xin Pan 已提交
611 612 613
      auto var = new VarHandle(
          result->CreateEmptyNode(d_name, ir::Node::Type::kVariable),
          vars.size(), i, d_name, p);
614 615 616 617 618 619
      vars.emplace_back(var);
      op_handle->AddOutput(var);
    }
  }
}

X
Xin Pan 已提交
620 621 622
int MultiDevSSAGraphBuilder::GetOpDeviceID(
    const ir::Graph &graph, ir::Node *node,
    const std::unordered_map<std::string, int> &sharded_var_device) const {
Y
yuyang18 已提交
623
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
C
chengduoZH 已提交
624 625
    return -1;
  }
626
  int op_role = boost::get<int>(
627
      node->Op()->GetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
628 629
  if (op_role != static_cast<int>(framework::OpRole::kOptimize)) {
    return -1;
C
chengduoZH 已提交
630
  }
631
  auto param_grad = boost::get<std::vector<std::string>>(
X
Xin Pan 已提交
632
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
633 634

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
X
Xin Pan 已提交
635
  int dev_id = GetVarDeviceID(graph, param_grad[1], sharded_var_device);
X
Xin Pan 已提交
636 637
  PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]",
                    node->Op()->Type(), param_grad[0], param_grad[1]);
638
  return dev_id;
639 640
}

X
Xin Pan 已提交
641 642 643
int MultiDevSSAGraphBuilder::GetVarDeviceID(
    const ir::Graph &graph, const std::string &varname,
    const std::unordered_map<std::string, int> &sharded_var_device) const {
X
Xin Pan 已提交
644 645
  auto got = sharded_var_device.find(varname);
  return got == sharded_var_device.end() ? -1 : got->second;
C
chengduoZH 已提交
646 647
}

648
void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(
649 650
    ir::Graph *result, const std::string &loss_grad_name,
    ir::Node *out_var_node) const {
Y
Yu Yang 已提交
651
  for (size_t i = 0; i < places_.size(); ++i) {
Y
yuyang18 已提交
652 653
    // Insert ScaleCost OpHandle
    auto *dev_ctx = platform::DeviceContextPool::Instance().Get(places_[i]);
X
Xin Pan 已提交
654
    auto *op_handle = new ScaleLossGradOpHandle(
X
polish  
Xin Pan 已提交
655
        result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation),
Y
yuyang18 已提交
656
        local_scopes_.size(), local_scopes_[i], places_[i], dev_ctx);
X
Xin Pan 已提交
657
    result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
Y
Yu Yang 已提交
658 659 660 661 662 663 664

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

665 666
    CreateOpOutput(result, op_handle,
                   result->CreateVarNode(out_var_node->Var()), places_[i], i);
Y
Yu Yang 已提交
667 668 669
  }
}

X
Xin Pan 已提交
670
void MultiDevSSAGraphBuilder::CreateComputationalOps(ir::Graph *result,
671
                                                     ir::Node *node,
T
typhoonzero 已提交
672 673
                                                     size_t num_places) const {
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
674 675
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
X
Xin Pan 已提交
676
    result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
677
        new ComputationOpHandle(result->CreateOpNode(node->Op()), s, p));
678
    CreateOpHandleIOs(result, node, scope_idx);
Y
Yu Yang 已提交
679 680 681
  }
}

X
Xin Pan 已提交
682
VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result,
C
chengduoZH 已提交
683 684
                                                   const std::string &og,
                                                   int dst_dev_id) const {
C
chengduoZH 已提交
685
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
686
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
687 688
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
689
#else
X
Xin Pan 已提交
690
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
691 692
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
693
#endif
X
Xin Pan 已提交
694
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
C
chengduoZH 已提交
695 696 697

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
698
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
699
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
C
chengduoZH 已提交
700 701 702 703
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
    op_handle->AddInput(prev_grad.get());
  }
X
Xin Pan 已提交
704
  auto &vars = result->Get<GraphVars>(kGraphVars)[dst_dev_id][og];
X
polish  
Xin Pan 已提交
705 706 707
  auto var =
      new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                    vars.size(), dst_dev_id, og, places_[dst_dev_id]);
C
chengduoZH 已提交
708 709 710 711 712
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

X
Xin Pan 已提交
713 714 715
int MultiDevSSAGraphBuilder::CreateDistTrainOp(
    ir::Graph *result, ir::Node *node,
    std::unordered_map<std::string, int> *sharded_var_device) const {
Y
Yancey1989 已提交
716
  int op_dev_id = -1;
717 718 719
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
X
Xin Pan 已提交
720
    input_var_names.push_back(input->Name());
721 722
  }
  for (ir::Node *output : node->outputs) {
X
Xin Pan 已提交
723
    output_var_names.push_back(output->Name());
724 725 726
  }

  if (node->Op()->Type() == "split_byref" ||
727 728
      node->Op()->Type() == "split_selected_rows" ||
      node->Op()->Type() == "split_ids") {
X
Xin Pan 已提交
729
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
730 731
    op_dev_id =
        GetVarDeviceID(*result, input_var_names[0], *sharded_var_device);
Y
Yancey1989 已提交
732
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
733 734
      op_dev_id = GetAppropriateDeviceID(input_var_names);
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
735
        sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
736 737
      }
    }
738
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
739
      sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
740
    }
741
  } else if (node->Op()->Type() == "concat") {
X
Xin Pan 已提交
742 743
    op_dev_id =
        GetVarDeviceID(*result, input_var_names[0], *sharded_var_device);
744
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
745
      sharded_var_device->emplace(varname, op_dev_id);
Y
yi.wu 已提交
746
    }
Y
Yancey1989 已提交
747
  } else {
748
    LOG(ERROR) << "got unexpected dist op: " << node->Op()->Type();
W
Wu Yi 已提交
749
    PADDLE_THROW(
Y
Yancey1989 已提交
750 751 752 753 754
        "the distribute training related op should be in [split_byref, "
        "concat].");
  }

  PADDLE_ENFORCE(op_dev_id != -1,
755 756
                 "can not find right place for distributed op: %s",
                 node->Op()->Type());
Y
Yancey1989 已提交
757

758
  CreateComputationalOp(result, node, op_dev_id);
Y
Yancey1989 已提交
759
  return op_dev_id;
W
Wu Yi 已提交
760 761 762 763 764 765 766 767 768 769 770 771 772 773
}

void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) {
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
  for (ir::Node *input : node->inputs) {
    VarHandle *var = nullptr;
    for (int place_offset = 0; place_offset < num_places; ++place_offset) {
      auto &var_holders = result->Get<GraphVars>(kGraphVars)[place_offset];
      auto &var_holder = var_holders[input->Name()];
      if (!var_holder.empty()) {
        var = var_holder.rbegin()->get();
        op_handle->AddInput(var);
      }
    }
Y
Yancey1989 已提交
774 775 776
  }
}

777
// Create RPC related op handles that connects its in ops and out ops.
X
Xin Pan 已提交
778 779 780
int MultiDevSSAGraphBuilder::CreateRPCOp(
    ir::Graph *result, ir::Node *node,
    std::unordered_map<std::string, int> *sharded_var_device) const {
Y
Yancey1989 已提交
781
  int op_dev_id = -1;
782
  if (node->Op()->Type() == "send") {
X
Xin Pan 已提交
783
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
784 785
    op_dev_id =
        GetVarDeviceID(*result, node->inputs[0]->Name(), *sharded_var_device);
X
Xin Pan 已提交
786 787
    PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
                   "This hack no longer holds, please fix.");
Y
Yancey1989 已提交
788 789 790
    // the variable name which contains .block means it was splited by
    // split_byref op
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce &&
X
Xin Pan 已提交
791
        node->inputs[0]->Name().find(".block") == std::string::npos) {
792 793
      std::vector<std::string> input_var_names;
      for (ir::Node *n : node->inputs) {
X
Xin Pan 已提交
794
        input_var_names.push_back(n->Name());
795
      }
W
Wu Yi 已提交
796 797 798 799 800 801
      auto send_param_grad = boost::get<std::vector<std::string>>(
          node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
      op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
      VLOG(10) << "send grad " << input_var_names[0] << " origin "
               << send_param_grad[1] << " place: " << op_dev_id;
802
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
803
        sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
804
      }
X
Xin Pan 已提交
805
      sharded_var_device->emplace(send_param_grad[1], op_dev_id);
Y
Yancey1989 已提交
806
    }
807 808 809
  } else if (node->Op()->Type() == "recv") {
    std::vector<std::string> output_var_names;
    for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
810
      output_var_names.push_back(n->Name());
811
    }
W
Wu Yi 已提交
812 813 814
    auto recv_param_grad = boost::get<std::vector<std::string>>(
        node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
    if (recv_param_grad.size() == 2U) {
X
Xin Pan 已提交
815 816
      op_dev_id =
          GetVarDeviceID(*result, recv_param_grad[1], *sharded_var_device);
W
Wu Yi 已提交
817 818 819 820 821 822
      VLOG(10) << "recv param " << recv_param_grad[0]
               << " get grad place: " << recv_param_grad[1]
               << " place: " << op_dev_id;
    } else {
      op_dev_id = GetAppropriateDeviceID(output_var_names);
    }
823
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
824
      sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
825 826
    }
  } else {
W
Wu Yi 已提交
827
    // send_barrier, fetch_barrier will run on place 0;
Y
Yancey1989 已提交
828 829 830 831
    op_dev_id = 0;
  }

  PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s",
832
                 node->Op()->Type());
X
Xin Pan 已提交
833
  result->Get<GraphOps>(kGraphOps).emplace_back(new RPCOpHandle(
834 835
      result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id],
      node->Op()->Type(), places_[op_dev_id]));
Y
fix pe  
Yancey1989 已提交
836

W
Wu Yi 已提交
837 838
  if (node->Op()->Type() == "send") {
    CreateOpHandleIOs(result, node, op_dev_id);
Y
Yancey1989 已提交
839
  } else {
W
Wu Yi 已提交
840 841 842 843 844 845
    // send_barrier, recv, fetch_barrier's inputs are deps var, get them from
    // all places
    auto p = places_[op_dev_id];
    auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
Y
Yancey1989 已提交
846

W
Wu Yi 已提交
847 848 849 850
    SetOpInputsAllPlaces(result, node, places_.size());
    for (ir::Node *output : node->outputs) {
      int outvar_dev_id = op_dev_id;
      if (node->Op()->Type() == "fetch_barrier") {
X
Xin Pan 已提交
851 852
        outvar_dev_id =
            GetVarDeviceID(*result, output->Name(), *sharded_var_device);
W
Wu Yi 已提交
853 854 855 856 857 858 859 860 861 862 863 864 865
        PADDLE_ENFORCE_NE(outvar_dev_id, -1);
      }
      p = places_[outvar_dev_id];
      ir::Node *new_node = nullptr;
      if (output->Var()) {
        new_node = result->CreateVarNode(output->Var());
      } else {
        new_node =
            result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
      }
      CreateOpOutput(result, op_handle, new_node, p, outvar_dev_id);
    }
  }
Y
Yancey1989 已提交
866
  return op_dev_id;
Y
Yu Yang 已提交
867 868
}

869
bool MultiDevSSAGraphBuilder::IsScaleLossOp(ir::Node *node) const {
Y
yuyang18 已提交
870
  return boost::get<int>(
871
             node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Fix bug  
yuyang18 已提交
872 873 874
             (static_cast<int>(OpRole::kBackward) |
              static_cast<int>(OpRole::kLoss)) &&
         !loss_var_name_.empty();  // If loss_var is empty. This is test mode
Y
Yu Yang 已提交
875
}
Y
Yu Yang 已提交
876 877 878
}  // namespace details
}  // namespace framework
}  // namespace paddle
X
Xin Pan 已提交
879

X
Xin Pan 已提交
880
REGISTER_PASS(multi_devices_pass,
X
Xin Pan 已提交
881 882 883 884 885 886
              paddle::framework::details::MultiDevSSAGraphBuilder)
    .RequirePassAttr(paddle::framework::details::kLossVarName)
    .RequirePassAttr(paddle::framework::details::kPlaces)
    .RequirePassAttr(paddle::framework::details::kParams)
    .RequirePassAttr(paddle::framework::details::kLocalScopes)
    .RequirePassAttr(paddle::framework::details::kStrategy);