multi_devices_graph_pass.cc 33.2 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
C
chengduoZH 已提交
14
#include <algorithm>
Y
Yancey1989 已提交
15
#include <fstream>
C
chengduoZH 已提交
16
#include <string>
C
chengduoZH 已提交
17
#include <utility>
C
chengduoZH 已提交
18 19
#include <vector>

20
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
C
chengduoZH 已提交
21
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
22
#include "paddle/fluid/framework/details/computation_op_handle.h"
23
#include "paddle/fluid/framework/details/data_balance_op_handle.h"
X
Xin Pan 已提交
24
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
C
chengduoZH 已提交
25
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yancey1989 已提交
26
#include "paddle/fluid/framework/details/rpc_op_handle.h"
Y
Yu Yang 已提交
27
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
X
better  
Xin Pan 已提交
28
#include "paddle/fluid/framework/ir/graph_helper.h"
X
Xin Pan 已提交
29
#include "paddle/fluid/framework/ir/node.h"
Y
Fix bug  
yuyang18 已提交
30
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
31
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
32

Y
Yu Yang 已提交
33 34 35
namespace paddle {
namespace framework {
namespace details {
X
Xin Pan 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
namespace {
void PolishGraphToSupportDataHazards(ir::Graph *graph) {
  for (auto &var_map : graph->Get<GraphVars>(kGraphVars)) {
    for (auto &name_pair : var_map) {
      if (name_pair.second.size() <= 1) {
        continue;
      }
      auto it_new = name_pair.second.rbegin();
      auto it_old = name_pair.second.rbegin();
      ++it_old;
      for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) {
        OpHandleBase *write_op = (*it_new)->GeneratedOp();
        const auto &read_ops = (*it_old)->PendingOps();

        for (auto *read_op : read_ops) {
          // Manually add a dependency var from read_op to write_op;
          if (read_op == write_op) {
            // Read Write is the same op.
            continue;
          }
          bool has_dep = false;
          for (auto *r_out : read_op->Outputs()) {
            for (auto *w_in : write_op->Inputs()) {
              if (r_out->Node() == w_in->Node()) {
                has_dep = true;
                break;
              }
            }
          }
          if (has_dep) continue;

          auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar());
          read_op->AddOutput(dep_var);
          write_op->AddInput(dep_var);
          graph->Get<GraphDepVars>(kGraphDepVars).emplace(dep_var);
        }
      }
    }
  }
}

VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node,
                                      const platform::Place &place,
                                      size_t place_offset) {
  auto &var_holders = graph->Get<GraphVars>(kGraphVars)[place_offset];
  auto &var_holder = var_holders[node->Name()];
  VarHandle *var = nullptr;
  if (var_holder.empty()) {
    if (node->Var()) {
      var = new VarHandle(graph->CreateVarNode(node->Var()), 0, place_offset,
                          node->Name(), place);
    } else {
      var = new VarHandle(
          graph->CreateEmptyNode(node->Name(), ir::Node::Type::kVariable), 0,
          place_offset, node->Name(), place);
    }
    var_holder.emplace_back(var);
  } else {
    var = var_holder.rbegin()->get();
  }
  return var;
}

void CreateOpOutput(ir::Graph *graph, OpHandleBase *op_handle,
                    ir::Node *new_node, const platform::Place &place,
                    size_t place_offset) {
  auto &vars =
      graph->Get<GraphVars>(kGraphVars)[place_offset][new_node->Name()];
  size_t version = vars.size();
  auto var =
      new VarHandle(new_node, version, place_offset, new_node->Name(), place);
  vars.emplace_back(var);
  op_handle->AddOutput(var);
}

void AddOutputToLeafOps(ir::Graph *graph) {
  for (auto &op : graph->Get<GraphOps>(kGraphOps)) {
    if (!op->Outputs().empty()) {
      continue;
    }
    auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar());
    graph->Get<GraphDepVars>(kGraphDepVars).emplace(dummy_leaf);
    op->AddOutput(dummy_leaf);
  }
}
}  // namespace
Y
Yu Yang 已提交
122

X
Xin Pan 已提交
123 124 125 126 127 128
static const char kLossVarName[] = "loss_var_name";
static const char kPlaces[] = "places";
static const char kParams[] = "params";
static const char kLocalScopes[] = "local_scopes";
static const char kStrategy[] = "strategy";

X
Xin Pan 已提交
129
void MultiDevSSAGraphBuilder::Init() const {
X
Xin Pan 已提交
130 131 132 133
  loss_var_name_ = Get<const std::string>(kLossVarName);
  places_ = Get<const std::vector<platform::Place>>(kPlaces);
  local_scopes_ = Get<const std::vector<Scope *>>(kLocalScopes);
  strategy_ = Get<const BuildStrategy>(kStrategy);
Y
Yu Yang 已提交
134
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
135
  nccl_ctxs_ = &Get<platform::NCCLContextMap>("nccl_ctxs");
Y
Yu Yang 已提交
136
#endif
X
Xin Pan 已提交
137

X
Xin Pan 已提交
138
  for (auto &p : Get<const std::unordered_set<std::string>>(kParams)) {
Y
Yu Yang 已提交
139 140
    grad_names_.insert(GradVarName(p));
  }
Y
Yancey1989 已提交
141
  balance_vars_.resize(places_.size(), 0);
Y
yuyang18 已提交
142 143 144 145 146
  if (strategy_.enable_data_balance_ && places_.size() == 1) {
    LOG(WARNING) << "It is no need to enable data balance when there is only "
                    "one place. enable_data_balance is set to False.";
    strategy_.enable_data_balance_ = false;
  }
Y
Yu Yang 已提交
147 148
}

X
Xin Pan 已提交
149 150
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(ir::Graph *result,
                                                ir::Node *node,
Y
Yu Yang 已提交
151 152
                                                size_t place_id) const {
  auto p = places_[place_id];
X
Xin Pan 已提交
153
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
X
Xin Pan 已提交
154 155
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
T
wip  
typhoonzero 已提交
156

157 158
  for (ir::Node *input : node->inputs) {
    VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id);
T
wip  
typhoonzero 已提交
159 160 161
    op_handle->AddInput(var);
  }

162
  for (ir::Node *output : node->outputs) {
X
polish  
Xin Pan 已提交
163 164 165 166 167 168 169 170
    ir::Node *new_node = nullptr;
    if (output->Var()) {
      new_node = result->CreateVarNode(output->Var());
    } else {
      new_node =
          result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
    }
    CreateOpOutput(result, op_handle, new_node, p, place_id);
T
wip  
typhoonzero 已提交
171 172
  }
}
Y
fix pe  
Yancey1989 已提交
173 174

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainSendVars(
X
Xin Pan 已提交
175
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
176
  std::vector<std::string> send_vars;
Y
Yancey1989 已提交
177 178
  // since parameters are all in block 0,
  // it's enough to only scan send ops in block 0
179 180
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
181 182
    // TODO(Yancey1989): use a graceful method to find send op,
    // instead of the the hard code string
183
    if (op->Type() == "send") {
Y
fix pe  
Yancey1989 已提交
184 185 186 187 188 189 190 191 192 193
      auto op_vars = op->InputArgumentNames();
      send_vars.reserve(send_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      send_vars.insert(send_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return send_vars;
}

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainRecvVars(
X
Xin Pan 已提交
194
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
195
  std::vector<std::string> recv_vars;
196 197
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
198 199 200
    // TODO(Yancey1989): use a graceful method to find recv op,
    // instead of the hard code string
    if (op->Type() == "recv") {
Y
fix pe  
Yancey1989 已提交
201 202 203 204 205 206 207 208 209 210
      auto op_vars = op->OutputArgumentNames();
      recv_vars.reserve(recv_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      recv_vars.insert(recv_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return recv_vars;
}

bool MultiDevSSAGraphBuilder::IsDistTrainOp(
211
    ir::Node *node, const std::vector<std::string> &send_vars,
Y
fix pe  
Yancey1989 已提交
212 213
    const std::vector<std::string> &recv_vars) const {
  if (send_vars.size() == 0 || recv_vars.size() == 0) {
T
typhoonzero 已提交
214 215 216
    return false;
  }

Y
Yu Yang 已提交
217 218 219 220
  /**
   * Check any of opvars contains `.block` and in sendvars
   */
  auto checker = [](const std::vector<std::string> &opvars,
Y
fix pe  
Yancey1989 已提交
221
                    const std::vector<std::string> &rpc_vars) -> bool {
T
typhoonzero 已提交
222
    for (auto &var : opvars) {
Y
Yancey1989 已提交
223 224 225
      // a variable name with the suffix `.block` means it's a splited
      // variable by (DistributeTranspiler)
      // [python/paddle/fluid/transpiler/distribute_transpiler.py]
T
typhoonzero 已提交
226
      if (var.find(".block") != std::string::npos &&
Y
fix pe  
Yancey1989 已提交
227
          std::find(rpc_vars.begin(), rpc_vars.end(), var) != rpc_vars.end()) {
Y
Yu Yang 已提交
228
        return true;
T
typhoonzero 已提交
229 230
      }
    }
Y
Yu Yang 已提交
231
    return false;
T
typhoonzero 已提交
232 233
  };

234 235 236
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
X
Xin Pan 已提交
237
    input_var_names.push_back(input->Name());
238 239
  }
  for (ir::Node *output : node->outputs) {
X
Xin Pan 已提交
240
    output_var_names.push_back(output->Name());
241 242 243 244
  }

  return checker(output_var_names, send_vars) ||
         checker(input_var_names, recv_vars);
T
typhoonzero 已提交
245 246
}

Y
Yancey1989 已提交
247 248 249 250
size_t MultiDevSSAGraphBuilder::GetAppropriateDeviceID(
    const std::vector<std::string> &var_names) const {
  int64_t numel_sum = 0;
  for (auto var_name : var_names) {
X
Xin Pan 已提交
251
    if (all_vars_.find(var_name) == all_vars_.end()) continue;
Y
Yancey1989 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
    auto var_desc = all_vars_.at(var_name);
    PADDLE_ENFORCE_NOT_NULL(var_desc);
    auto dim = framework::make_ddim(var_desc->GetShape());
    int64_t numel = framework::product(dim);
    PADDLE_ENFORCE_GT(numel, 0);
    numel_sum += numel;
  }

  auto smallest =
      std::min_element(std::begin(balance_vars_), std::end(balance_vars_));
  size_t dev_id =
      static_cast<size_t>(std::distance(std::begin(balance_vars_), smallest));
  balance_vars_[dev_id] += numel_sum;
  return dev_id;
}

X
better  
Xin Pan 已提交
268 269 270 271 272
// Topology sort the graph nodes from inputs to outputs.
// Since SSAGraphBuilder depends on forward/backward nodes to assign devices
// to parameter/gradients before optimizer ops, topo sort is insufficient. (
// some optimizer ops might not depend on any nodes), we manually move all
// optimizer nodes after last backward nodes.
X
Xin Pan 已提交
273 274 275
// However, the assumption by SSAGraphBuilder should be relaxed in the future.
std::vector<ir::Node *> SortOpsAndDelayOptimizeOp(const ir::Graph &graph) {
  std::vector<ir::Node *> ret = ir::TopologySortOperations(graph);
X
better  
Xin Pan 已提交
276 277 278 279 280
  size_t last_backward = 0;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (boost::get<int>(
            ret[i]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
        static_cast<int>(OpRole::kBackward)) {
X
Xin Pan 已提交
281
      last_backward = i;
X
better  
Xin Pan 已提交
282 283 284
    }
  }

X
Xin Pan 已提交
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
  std::vector<ir::Node *> optimize_ops;
  std::vector<ir::Node *> sorted_ret;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (i < last_backward) {
      if (boost::get<int>(ret[i]->Op()->GetAttr(
              OpProtoAndCheckerMaker::OpRoleAttrName())) ==
          static_cast<int>(OpRole::kOptimize)) {
        optimize_ops.push_back(ret[i]);
      } else {
        sorted_ret.push_back(ret[i]);
      }
    } else if (i == last_backward) {
      sorted_ret.push_back(ret[i]);
      // Verify that no operations before optimize ops depends on optimize ops.
      std::unordered_set<ir::Node *> optimize_set(optimize_ops.begin(),
                                                  optimize_ops.end());
      for (ir::Node *n : sorted_ret) {
        for (ir::Node *in : n->inputs) {
          for (ir::Node *pre_n : in->inputs) {
            PADDLE_ENFORCE(optimize_set.find(pre_n) == optimize_set.end(),
                           "optimize operations cannot be depended by forward "
                           "or backward node %s -> %s",
                           pre_n->Name(), n->Name());
          }
        }
X
Xin Pan 已提交
310
      }
X
Xin Pan 已提交
311 312 313 314
      sorted_ret.insert(sorted_ret.end(), optimize_ops.begin(),
                        optimize_ops.end());
    } else {
      sorted_ret.push_back(ret[i]);
X
Xin Pan 已提交
315 316
    }
  }
X
better  
Xin Pan 已提交
317 318 319
  return sorted_ret;
}

X
Xin Pan 已提交
320
std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilder::ApplyImpl(
X
Xin Pan 已提交
321
    std::unique_ptr<ir::Graph> graph) const {
X
Xin Pan 已提交
322
  Init();
X
Xin Pan 已提交
323
  // Give the topology sort order and rebuild the graph structure.
X
better  
Xin Pan 已提交
324
  std::vector<ir::Node *> sorted_ops = SortOpsAndDelayOptimizeOp(*graph);
X
Xin Pan 已提交
325 326
  auto nodes = graph->ReleaseNodes();
  ir::Graph &result = *graph;
327 328

  for (auto &node : nodes) {
X
Xin Pan 已提交
329
    if (node->IsVar() && node->Var()) {
X
Xin Pan 已提交
330
      all_vars_.emplace(node->Name(), node->Var());
331
    }
C
fix ci  
chengduoZH 已提交
332
  }
C
chengduoZH 已提交
333
  std::unordered_set<std::string> og_has_been_broadcast;
Y
Yu Yang 已提交
334 335

  // We cannot invoke resize. It is a bug of GCC 4.8
X
Xin Pan 已提交
336 337 338 339
  result.Set(kGraphVars, new GraphVars(places_.size()));
  result.Set(kGraphDepVars, new GraphDepVars);
  result.Set(kGraphOps, new GraphOps);
  result.Set(kShardedVarDevice, new ShardedVarDevice);
340

Y
fix pe  
Yancey1989 已提交
341
  // find send/recv vars so that we can place the distributed training
342
  // related op in the place 0
X
Xin Pan 已提交
343 344
  auto send_vars = FindDistTrainSendVars(sorted_ops);
  auto recv_vars = FindDistTrainRecvVars(sorted_ops);
T
typhoonzero 已提交
345

C
chengduoZH 已提交
346 347 348
  std::vector<std::unordered_set<std::string>> bcast_var_name_set;
  bcast_var_name_set.resize(places_.size());

C
chengduoZH 已提交
349
  size_t cur_device_id = 0;
Y
Yu Yang 已提交
350
  bool is_forwarding = true;
Y
Yancey1989 已提交
351
  bool is_dist_train = false;
352

X
better  
Xin Pan 已提交
353
  for (ir::Node *node : sorted_ops) {
Y
Yancey1989 已提交
354
    if (boost::get<int>(
355
            node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Yancey1989 已提交
356
        static_cast<int>(OpRole::kRPC)) {
Y
Yancey1989 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369
      int op_dev_id = CreateRPCOp(&result, node);
      PADDLE_ENFORCE(op_dev_id != -1,
                     "Can not schedule the RPC operator to the right place.");
      if (node->Op()->Type() == "recv") {
        auto recv_vars_attr =
            boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
                OpProtoAndCheckerMaker::OpRoleVarAttrName()));
        PADDLE_ENFORCE(recv_vars_attr.size() == 2UL);  // [parameter, gradient]
        if (recv_vars_attr[0].find(".block") == std::string::npos) {
          bcast_var_name_set[op_dev_id].emplace(recv_vars_attr[0]);
        }
      }
      is_dist_train = true;
X
Xin Pan 已提交
370
    } else if (IsDistTrainOp(node, send_vars, recv_vars)) {
Y
Yancey1989 已提交
371 372 373 374 375
      int op_dev_id = CreateDistTrainOp(&result, node);
      if (node->Op()->Type() == "concat") {
        auto origin_param_name = node->Op()->OutputArgumentNames()[0];
        bcast_var_name_set[op_dev_id].emplace(origin_param_name);
      }
X
Xin Pan 已提交
376
    } else if (IsScaleLossOp(node)) {
Y
Yu Yang 已提交
377
      // user can customize loss@grad if not use_default_grad_scale_
Y
yuyang18 已提交
378 379
      if (strategy_.gradient_scale_ !=
          BuildStrategy::GradientScaleStrategy::kCustomized) {
X
Xin Pan 已提交
380
        // TODO(paddle-dev): Why is there no input for this op_handle?
381 382
        auto loss_grad_name = node->Op()->OutputArgumentNames()[0];
        CreateScaleLossGradOp(&result, loss_grad_name);
Y
Yu Yang 已提交
383
      }
384 385 386 387
      // This assumes the backward generating code will ensure IsScaleLossOp
      // is true only for the op that scale the final scalar loss.
      // It also assumes backward op will always follow the forward op in
      // the block.
Y
Yu Yang 已提交
388
      is_forwarding = false;
Y
Yu Yang 已提交
389
    } else {
X
Xin Pan 已提交
390
      int op_dev_id = GetOpDeviceID(result, node);
C
chengduo 已提交
391
      if (op_dev_id != -1) {  // This op only runs on one specific device.
X
Xin Pan 已提交
392
        CreateComputationalOp(&result, node, op_dev_id);
393
        for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
394
          graph->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
395
              .emplace(n->Name(), op_dev_id);
C
chengduoZH 已提交
396
        }
C
chengduo 已提交
397 398 399
      } else {
        // This op runs on all devices, and its output may have parameter's
        // gradients.
X
Xin Pan 已提交
400
        // TODO(paddle-dev): Why is so special about "read" op?
401 402
        if (node->Op()->Type() == "read" && strategy_.enable_data_balance_) {
          node->Op()->SetAttr("throw_eof_exp", false);
X
Xin Pan 已提交
403
          CreateComputationalOps(&result, node, places_.size());
404
          const auto &data_var_names = node->Op()->Output("Out");
405
          InsertDataBalanceOp(&result, data_var_names);
F
fengjiayi 已提交
406
        } else {
X
Xin Pan 已提交
407
          CreateComputationalOps(&result, node, places_.size());
408 409
        }

C
chengduo 已提交
410 411 412
        if (!is_forwarding && places_.size() > 1) {
          // Currently, we assume that once gradient is generated, it can be
          // broadcast, and each gradient is only broadcast once.
413
          if (static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
C
chengduo 已提交
414 415 416
                                    OpProtoAndCheckerMaker::OpRoleAttrName())) &
                                static_cast<int>(OpRole::kBackward))) {
            try {
417 418
              auto backward_vars = boost::get<std::vector<std::string>>(
                  node->Op()->GetNullableAttr(
C
chengduo 已提交
419
                      OpProtoAndCheckerMaker::OpRoleVarAttrName()));
Y
yuyang18 已提交
420

C
chengduo 已提交
421
              PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
Y
yuyang18 已提交
422

C
chengduo 已提交
423 424 425 426
              for (size_t i = 0; i < backward_vars.size(); i += 2) {
                auto &p_name = backward_vars[i];
                auto &g_name = backward_vars[i + 1];
                VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;
Y
yuyang18 已提交
427

C
chengduo 已提交
428 429 430 431
                switch (strategy_.reduce_) {
                  case BuildStrategy::ReduceStrategy::kReduce:
                    cur_device_id = GetAppropriateDeviceID({g_name});
                    CreateReduceOp(&result, g_name, cur_device_id);
X
Xin Pan 已提交
432
                    graph->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
433
                        .emplace(g_name, cur_device_id);
Y
Yancey1989 已提交
434 435 436
                    if (!is_dist_train) {
                      bcast_var_name_set[cur_device_id].emplace(p_name);
                    }
C
chengduo 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449
                    break;
                  case BuildStrategy::ReduceStrategy::kAllReduce:
                    if (IsSparseGradient(g_name)) {
                      CreateReduceOp(&result, g_name, 0);
                      CreateBroadcastOp(&result, g_name, 0);
                    } else {
                      InsertAllReduceOp(&result, g_name);
                    }
                    break;
                  default:
                    LOG(FATAL) << "Unknown reduce strategy ";
                    break;
                }
Y
yuyang18 已提交
450
              }
C
chengduo 已提交
451
            } catch (boost::bad_get e) {
C
chengduoZH 已提交
452
            }
Y
Yu Yang 已提交
453 454 455 456 457
          }
        }
      }
    }
  }
458 459 460 461 462
  bool use_gpu = false;
#ifdef PADDLE_WITH_CUDA
  use_gpu = nccl_ctxs_ != nullptr;
#endif

Y
Yancey1989 已提交
463 464 465
  if ((use_gpu &&
       strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce) ||
      is_dist_train) {
Y
Yancey1989 已提交
466
    // allways broadcast receieved parameters for distributed training
467 468 469 470 471
    for (size_t dev_id = 0; dev_id < bcast_var_name_set.size(); ++dev_id) {
      auto &to_bcast_set = bcast_var_name_set[dev_id];
      for (auto &bcast_name : to_bcast_set) {
        CreateBroadcastOp(&result, bcast_name, dev_id);
      }
C
chengduoZH 已提交
472 473
    }
  }
Y
Yu Yang 已提交
474
  /*
X
Xin Pan 已提交
475 476 477
  Dependency graph has been constructed. However, there are still data
  hazards need to be handled.
 */
Y
Yu Yang 已提交
478
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
479

Y
Yu Yang 已提交
480 481 482 483
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);
X
Xin Pan 已提交
484
  PADDLE_ENFORCE(!ir::HasCircle(result));
Q
qiaolongfei 已提交
485
  return graph;
Y
Yu Yang 已提交
486 487
}

Y
Yancey1989 已提交
488 489 490
bool MultiDevSSAGraphBuilder::IsSparseGradient(const std::string &og) const {
  PADDLE_ENFORCE(all_vars_.count(og) != 0);
  if (all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS) {
C
fix ci  
chengduoZH 已提交
491 492 493
    return true;
  }
  return false;
494 495
}

496 497 498 499 500 501 502 503 504 505 506 507 508
void MultiDevSSAGraphBuilder::SetCommunicationContext(
    OpHandleBase *op_handle, const platform::Place &p) const {
#ifdef PADDLE_WITH_CUDA
  if (nccl_ctxs_ == nullptr) {
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
  }
#else
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
#endif
}

X
Xin Pan 已提交
509
void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result,
C
chengduoZH 已提交
510
                                                const std::string &p_name,
C
chengduoZH 已提交
511
                                                size_t src_dev_id) const {
C
chengduoZH 已提交
512
#ifdef PADDLE_WITH_CUDA
X
polish  
Xin Pan 已提交
513 514 515
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
C
chengduoZH 已提交
516
#else
X
polish  
Xin Pan 已提交
517 518 519
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
C
chengduoZH 已提交
520
#endif
X
Xin Pan 已提交
521
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
X
Xin Pan 已提交
522

X
Xin Pan 已提交
523
  auto *in =
X
Xin Pan 已提交
524
      result->Get<GraphVars>(kGraphVars).at(src_dev_id).at(p_name).back().get();
C
chengduoZH 已提交
525 526 527 528
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
529
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
530
    auto &vars = result->Get<GraphVars>(kGraphVars).at(i).at(p_name);
X
polish  
Xin Pan 已提交
531 532 533
    auto *out_var = new VarHandle(
        result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(),
        i, p_name, p);
C
chengduoZH 已提交
534 535 536 537 538
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
  }
}

X
Xin Pan 已提交
539
void MultiDevSSAGraphBuilder::CreateComputationalOp(ir::Graph *result,
540
                                                    ir::Node *node,
C
chengduoZH 已提交
541
                                                    int dev_id) const {
X
Xin Pan 已提交
542
  result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
543
      new ComputationOpHandle(result->CreateOpNode(node->Op()),
544 545
                              local_scopes_[dev_id], places_[dev_id]));
  CreateOpHandleIOs(result, node, dev_id);
C
chengduoZH 已提交
546 547
}

X
Xin Pan 已提交
548
void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result,
C
chengduoZH 已提交
549
                                                const std::string &og) const {
Y
Yu Yang 已提交
550
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
551
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
552 553
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
554
#else
X
Xin Pan 已提交
555
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
556 557
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
558
#endif
X
Xin Pan 已提交
559
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
Y
Yu Yang 已提交
560 561 562

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
563
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
564
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
Y
Yu Yang 已提交
565 566
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
Y
Yu Yang 已提交
567 568
    op_handle->AddInput(prev_grad.get());

X
Xin Pan 已提交
569
    auto var =
X
polish  
Xin Pan 已提交
570 571
        new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                      vars.size(), i, og, p);
Y
Yu Yang 已提交
572 573 574 575 576
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
}

577
void MultiDevSSAGraphBuilder::InsertDataBalanceOp(
X
Xin Pan 已提交
578
    ir::Graph *result, const std::vector<std::string> &datas) const {
F
fengjiayi 已提交
579
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
580
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
581 582
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
F
fengjiayi 已提交
583
#else
X
Xin Pan 已提交
584
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
585 586
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_));
F
fengjiayi 已提交
587
#endif
X
Xin Pan 已提交
588
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
589 590 591 592
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
    for (const std::string &d_name : datas) {
X
Xin Pan 已提交
593
      auto &vars = result->Get<GraphVars>(kGraphVars)[i][d_name];
594 595
      PADDLE_ENFORCE(!vars.empty());
      op_handle->AddInput(vars.back().get());
X
polish  
Xin Pan 已提交
596 597 598
      auto var = new VarHandle(
          result->CreateEmptyNode(d_name, ir::Node::Type::kVariable),
          vars.size(), i, d_name, p);
599 600 601 602 603 604
      vars.emplace_back(var);
      op_handle->AddOutput(var);
    }
  }
}

X
Xin Pan 已提交
605 606
int MultiDevSSAGraphBuilder::GetOpDeviceID(const ir::Graph &graph,
                                           ir::Node *node) const {
Y
yuyang18 已提交
607
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
C
chengduoZH 已提交
608 609
    return -1;
  }
610
  int op_role = boost::get<int>(
611
      node->Op()->GetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
612 613
  if (op_role != static_cast<int>(framework::OpRole::kOptimize)) {
    return -1;
C
chengduoZH 已提交
614
  }
615
  auto param_grad = boost::get<std::vector<std::string>>(
X
Xin Pan 已提交
616
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
617 618

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
X
Xin Pan 已提交
619
  int dev_id = GetVarDeviceID(graph, param_grad[1]);
X
Xin Pan 已提交
620 621
  PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]",
                    node->Op()->Type(), param_grad[0], param_grad[1]);
622
  return dev_id;
623 624
}

X
Xin Pan 已提交
625 626
int MultiDevSSAGraphBuilder::GetVarDeviceID(const ir::Graph &graph,
                                            const std::string &varname) const {
X
Xin Pan 已提交
627
  auto &sharded_var_device = graph.Get<ShardedVarDevice>(kShardedVarDevice);
X
Xin Pan 已提交
628 629
  auto got = sharded_var_device.find(varname);
  return got == sharded_var_device.end() ? -1 : got->second;
C
chengduoZH 已提交
630 631
}

632 633
void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(
    ir::Graph *result, const std::string &loss_grad_name) const {
Y
Yu Yang 已提交
634
  for (size_t i = 0; i < places_.size(); ++i) {
Y
yuyang18 已提交
635 636
    // Insert ScaleCost OpHandle
    auto *dev_ctx = platform::DeviceContextPool::Instance().Get(places_[i]);
X
Xin Pan 已提交
637
    auto *op_handle = new ScaleLossGradOpHandle(
X
polish  
Xin Pan 已提交
638
        result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation),
Y
yuyang18 已提交
639
        local_scopes_.size(), local_scopes_[i], places_[i], dev_ctx);
X
Xin Pan 已提交
640
    result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
Y
Yu Yang 已提交
641 642 643 644 645 646 647

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

648 649 650 651
    CreateOpOutput(
        result, op_handle,
        result->CreateEmptyNode(loss_grad_name, ir::Node::Type::kVariable),
        places_[i], i);
Y
Yu Yang 已提交
652 653 654
  }
}

X
Xin Pan 已提交
655
void MultiDevSSAGraphBuilder::CreateComputationalOps(ir::Graph *result,
656
                                                     ir::Node *node,
T
typhoonzero 已提交
657 658
                                                     size_t num_places) const {
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
659 660
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
X
Xin Pan 已提交
661
    result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
662
        new ComputationOpHandle(result->CreateOpNode(node->Op()), s, p));
663
    CreateOpHandleIOs(result, node, scope_idx);
Y
Yu Yang 已提交
664 665 666
  }
}

X
Xin Pan 已提交
667
VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result,
C
chengduoZH 已提交
668 669
                                                   const std::string &og,
                                                   int dst_dev_id) const {
C
chengduoZH 已提交
670
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
671
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
672 673
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
674
#else
X
Xin Pan 已提交
675
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
676 677
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
678
#endif
X
Xin Pan 已提交
679
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
C
chengduoZH 已提交
680 681 682

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
683
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
684
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
C
chengduoZH 已提交
685 686 687 688
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
    op_handle->AddInput(prev_grad.get());
  }
X
Xin Pan 已提交
689
  auto &vars = result->Get<GraphVars>(kGraphVars)[dst_dev_id][og];
X
polish  
Xin Pan 已提交
690 691 692
  auto var =
      new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                    vars.size(), dst_dev_id, og, places_[dst_dev_id]);
C
chengduoZH 已提交
693 694 695 696 697
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

Y
Yancey1989 已提交
698 699
int MultiDevSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result,
                                               ir::Node *node) const {
Y
Yancey1989 已提交
700
  int op_dev_id = -1;
701 702 703
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
X
Xin Pan 已提交
704
    input_var_names.push_back(input->Name());
705 706
  }
  for (ir::Node *output : node->outputs) {
X
Xin Pan 已提交
707
    output_var_names.push_back(output->Name());
708 709 710 711
  }

  if (node->Op()->Type() == "split_byref" ||
      node->Op()->Type() == "split_selected_rows") {
X
Xin Pan 已提交
712
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
713
    op_dev_id = GetVarDeviceID(*result, input_var_names[0]);
Y
Yancey1989 已提交
714
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
715 716
      op_dev_id = GetAppropriateDeviceID(input_var_names);
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
717
        result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
718
            .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
719 720
      }
    }
721
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
722
      result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
723
          .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
724
    }
725
  } else if (node->Op()->Type() == "concat") {
X
Xin Pan 已提交
726
    op_dev_id = GetVarDeviceID(*result, input_var_names[0]);
727
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
728
      result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
729
          .emplace(varname, op_dev_id);
Y
yi.wu 已提交
730
    }
Y
Yancey1989 已提交
731
  } else {
W
Wu Yi 已提交
732
    PADDLE_THROW(
Y
Yancey1989 已提交
733 734 735 736 737
        "the distribute training related op should be in [split_byref, "
        "concat].");
  }

  PADDLE_ENFORCE(op_dev_id != -1,
738 739
                 "can not find right place for distributed op: %s",
                 node->Op()->Type());
Y
Yancey1989 已提交
740

741
  CreateComputationalOp(result, node, op_dev_id);
Y
Yancey1989 已提交
742
  return op_dev_id;
W
Wu Yi 已提交
743 744 745 746 747 748 749 750 751 752 753 754 755 756
}

void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) {
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
  for (ir::Node *input : node->inputs) {
    VarHandle *var = nullptr;
    for (int place_offset = 0; place_offset < num_places; ++place_offset) {
      auto &var_holders = result->Get<GraphVars>(kGraphVars)[place_offset];
      auto &var_holder = var_holders[input->Name()];
      if (!var_holder.empty()) {
        var = var_holder.rbegin()->get();
        op_handle->AddInput(var);
      }
    }
Y
Yancey1989 已提交
757 758 759
  }
}

760
// Create RPC related op handles that connects its in ops and out ops.
Y
Yancey1989 已提交
761 762
int MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result,
                                         ir::Node *node) const {
Y
Yancey1989 已提交
763
  int op_dev_id = -1;
764
  if (node->Op()->Type() == "send") {
X
Xin Pan 已提交
765
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
766
    op_dev_id = GetVarDeviceID(*result, node->inputs[0]->Name());
X
Xin Pan 已提交
767 768
    PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
                   "This hack no longer holds, please fix.");
Y
Yancey1989 已提交
769 770 771
    // the variable name which contains .block means it was splited by
    // split_byref op
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce &&
X
Xin Pan 已提交
772
        node->inputs[0]->Name().find(".block") == std::string::npos) {
773 774
      std::vector<std::string> input_var_names;
      for (ir::Node *n : node->inputs) {
X
Xin Pan 已提交
775
        input_var_names.push_back(n->Name());
776
      }
W
Wu Yi 已提交
777 778 779 780 781 782
      auto send_param_grad = boost::get<std::vector<std::string>>(
          node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
      op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
      VLOG(10) << "send grad " << input_var_names[0] << " origin "
               << send_param_grad[1] << " place: " << op_dev_id;
783
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
784
        result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
785
            .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
786
      }
W
Wu Yi 已提交
787 788
      result->Get<ShardedVarDevice>(kShardedVarDevice)
          .emplace(send_param_grad[1], op_dev_id);
Y
Yancey1989 已提交
789
    }
790 791 792
  } else if (node->Op()->Type() == "recv") {
    std::vector<std::string> output_var_names;
    for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
793
      output_var_names.push_back(n->Name());
794
    }
W
Wu Yi 已提交
795 796 797 798 799 800 801 802 803 804
    auto recv_param_grad = boost::get<std::vector<std::string>>(
        node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
    if (recv_param_grad.size() == 2U) {
      op_dev_id = GetVarDeviceID(*result, recv_param_grad[1]);
      VLOG(10) << "recv param " << recv_param_grad[0]
               << " get grad place: " << recv_param_grad[1]
               << " place: " << op_dev_id;
    } else {
      op_dev_id = GetAppropriateDeviceID(output_var_names);
    }
805
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
806
      result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
807
          .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
808 809
    }
  } else {
W
Wu Yi 已提交
810
    // send_barrier, fetch_barrier will run on place 0;
Y
Yancey1989 已提交
811 812 813 814
    op_dev_id = 0;
  }

  PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s",
815
                 node->Op()->Type());
X
Xin Pan 已提交
816
  result->Get<GraphOps>(kGraphOps).emplace_back(new RPCOpHandle(
817 818
      result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id],
      node->Op()->Type(), places_[op_dev_id]));
Y
fix pe  
Yancey1989 已提交
819

W
Wu Yi 已提交
820 821
  if (node->Op()->Type() == "send") {
    CreateOpHandleIOs(result, node, op_dev_id);
Y
Yancey1989 已提交
822
  } else {
W
Wu Yi 已提交
823 824 825 826 827 828
    // send_barrier, recv, fetch_barrier's inputs are deps var, get them from
    // all places
    auto p = places_[op_dev_id];
    auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
Y
Yancey1989 已提交
829

W
Wu Yi 已提交
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
    SetOpInputsAllPlaces(result, node, places_.size());
    for (ir::Node *output : node->outputs) {
      int outvar_dev_id = op_dev_id;
      if (node->Op()->Type() == "fetch_barrier") {
        outvar_dev_id = GetVarDeviceID(*result, output->Name());
        PADDLE_ENFORCE_NE(outvar_dev_id, -1);
      }
      p = places_[outvar_dev_id];
      ir::Node *new_node = nullptr;
      if (output->Var()) {
        new_node = result->CreateVarNode(output->Var());
      } else {
        new_node =
            result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
      }
      CreateOpOutput(result, op_handle, new_node, p, outvar_dev_id);
    }
  }
Y
Yancey1989 已提交
848
  return op_dev_id;
Y
Yu Yang 已提交
849 850
}

851
bool MultiDevSSAGraphBuilder::IsScaleLossOp(ir::Node *node) const {
Y
yuyang18 已提交
852
  return boost::get<int>(
853
             node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Fix bug  
yuyang18 已提交
854 855 856
             (static_cast<int>(OpRole::kBackward) |
              static_cast<int>(OpRole::kLoss)) &&
         !loss_var_name_.empty();  // If loss_var is empty. This is test mode
Y
Yu Yang 已提交
857
}
Y
Yu Yang 已提交
858 859 860
}  // namespace details
}  // namespace framework
}  // namespace paddle
X
Xin Pan 已提交
861

X
Xin Pan 已提交
862
REGISTER_PASS(multi_devices_pass,
X
Xin Pan 已提交
863 864 865 866 867 868
              paddle::framework::details::MultiDevSSAGraphBuilder)
    .RequirePassAttr(paddle::framework::details::kLossVarName)
    .RequirePassAttr(paddle::framework::details::kPlaces)
    .RequirePassAttr(paddle::framework::details::kParams)
    .RequirePassAttr(paddle::framework::details::kLocalScopes)
    .RequirePassAttr(paddle::framework::details::kStrategy);