multi_devices_graph_pass.cc 35.0 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
C
chengduoZH 已提交
14
#include <algorithm>
Y
Yancey1989 已提交
15
#include <fstream>
C
chengduoZH 已提交
16
#include <string>
C
chengduoZH 已提交
17
#include <utility>
C
chengduoZH 已提交
18 19
#include <vector>

20
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
C
chengduoZH 已提交
21
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
22
#include "paddle/fluid/framework/details/computation_op_handle.h"
23
#include "paddle/fluid/framework/details/data_balance_op_handle.h"
24
#include "paddle/fluid/framework/details/fused_broadcast_op_handle.h"
X
Xin Pan 已提交
25
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
C
chengduoZH 已提交
26
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yancey1989 已提交
27
#include "paddle/fluid/framework/details/rpc_op_handle.h"
Y
Yu Yang 已提交
28
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
X
better  
Xin Pan 已提交
29
#include "paddle/fluid/framework/ir/graph_helper.h"
X
Xin Pan 已提交
30
#include "paddle/fluid/framework/ir/node.h"
Y
Fix bug  
yuyang18 已提交
31
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
32
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
33

Y
Yu Yang 已提交
34 35 36
namespace paddle {
namespace framework {
namespace details {
X
Xin Pan 已提交
37

X
Xin Pan 已提交
38
namespace {
X
Xin Pan 已提交
39
// TODO(panyx0718): Clean this up as well.
X
Xin Pan 已提交
40 41 42 43 44
// all operators. NOTE that even we use a vector here, the operators is
// unordered.
typedef std::vector<OpHandleBase *> GraphOps;
const char kGraphOps[] = "ops";

X
Xin Pan 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
void PolishGraphToSupportDataHazards(ir::Graph *graph) {
  for (auto &var_map : graph->Get<GraphVars>(kGraphVars)) {
    for (auto &name_pair : var_map) {
      if (name_pair.second.size() <= 1) {
        continue;
      }
      auto it_new = name_pair.second.rbegin();
      auto it_old = name_pair.second.rbegin();
      ++it_old;
      for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) {
        OpHandleBase *write_op = (*it_new)->GeneratedOp();
        const auto &read_ops = (*it_old)->PendingOps();

        for (auto *read_op : read_ops) {
          // Manually add a dependency var from read_op to write_op;
          if (read_op == write_op) {
            // Read Write is the same op.
            continue;
          }
          bool has_dep = false;
          for (auto *r_out : read_op->Outputs()) {
            for (auto *w_in : write_op->Inputs()) {
              if (r_out->Node() == w_in->Node()) {
                has_dep = true;
                break;
              }
            }
          }
          if (has_dep) continue;

          auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar());
          read_op->AddOutput(dep_var);
          write_op->AddInput(dep_var);
          graph->Get<GraphDepVars>(kGraphDepVars).emplace(dep_var);
        }
      }
    }
  }
}

VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node,
                                      const platform::Place &place,
                                      size_t place_offset) {
  auto &var_holders = graph->Get<GraphVars>(kGraphVars)[place_offset];
  auto &var_holder = var_holders[node->Name()];
  VarHandle *var = nullptr;
  if (var_holder.empty()) {
    if (node->Var()) {
      var = new VarHandle(graph->CreateVarNode(node->Var()), 0, place_offset,
                          node->Name(), place);
    } else {
      var = new VarHandle(
          graph->CreateEmptyNode(node->Name(), ir::Node::Type::kVariable), 0,
          place_offset, node->Name(), place);
    }
    var_holder.emplace_back(var);
  } else {
X
clean1  
Xin Pan 已提交
102
    var = *var_holder.rbegin();
X
Xin Pan 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
  }
  return var;
}

void CreateOpOutput(ir::Graph *graph, OpHandleBase *op_handle,
                    ir::Node *new_node, const platform::Place &place,
                    size_t place_offset) {
  auto &vars =
      graph->Get<GraphVars>(kGraphVars)[place_offset][new_node->Name()];
  size_t version = vars.size();
  auto var =
      new VarHandle(new_node, version, place_offset, new_node->Name(), place);
  vars.emplace_back(var);
  op_handle->AddOutput(var);
}

void AddOutputToLeafOps(ir::Graph *graph) {
  for (auto &op : graph->Get<GraphOps>(kGraphOps)) {
    if (!op->Outputs().empty()) {
      continue;
    }
    auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar());
    graph->Get<GraphDepVars>(kGraphDepVars).emplace(dummy_leaf);
    op->AddOutput(dummy_leaf);
  }
}
}  // namespace
Y
Yu Yang 已提交
130

X
Xin Pan 已提交
131 132 133 134 135
static const char kLossVarName[] = "loss_var_name";
static const char kPlaces[] = "places";
static const char kParams[] = "params";
static const char kLocalScopes[] = "local_scopes";
static const char kStrategy[] = "strategy";
136
static const char kNumTrainers[] = "num_trainers";
X
Xin Pan 已提交
137

X
Xin Pan 已提交
138
void MultiDevSSAGraphBuilder::Init() const {
X
clean  
Xin Pan 已提交
139 140 141
  all_vars_.clear();
  balance_vars_.clear();

X
Xin Pan 已提交
142 143 144 145
  loss_var_name_ = Get<const std::string>(kLossVarName);
  places_ = Get<const std::vector<platform::Place>>(kPlaces);
  local_scopes_ = Get<const std::vector<Scope *>>(kLocalScopes);
  strategy_ = Get<const BuildStrategy>(kStrategy);
P
peizhilin 已提交
146
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
147
  nccl_ctxs_ = &Get<platform::NCCLContextMap>("nccl_ctxs");
Y
Yu Yang 已提交
148
#endif
X
Xin Pan 已提交
149

X
Xin Pan 已提交
150
  for (auto &p : Get<const std::unordered_set<std::string>>(kParams)) {
Y
Yu Yang 已提交
151 152
    grad_names_.insert(GradVarName(p));
  }
Y
Yancey1989 已提交
153
  balance_vars_.resize(places_.size(), 0);
Y
yuyang18 已提交
154 155 156 157 158
  if (strategy_.enable_data_balance_ && places_.size() == 1) {
    LOG(WARNING) << "It is no need to enable data balance when there is only "
                    "one place. enable_data_balance is set to False.";
    strategy_.enable_data_balance_ = false;
  }
Y
Yu Yang 已提交
159 160
}

X
Xin Pan 已提交
161 162
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(ir::Graph *result,
                                                ir::Node *node,
Y
Yu Yang 已提交
163 164
                                                size_t place_id) const {
  auto p = places_[place_id];
X
clean1  
Xin Pan 已提交
165
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
X
Xin Pan 已提交
166 167
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
T
wip  
typhoonzero 已提交
168

169 170
  for (ir::Node *input : node->inputs) {
    VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id);
T
wip  
typhoonzero 已提交
171 172 173
    op_handle->AddInput(var);
  }

174
  for (ir::Node *output : node->outputs) {
X
polish  
Xin Pan 已提交
175 176 177 178 179 180 181 182
    ir::Node *new_node = nullptr;
    if (output->Var()) {
      new_node = result->CreateVarNode(output->Var());
    } else {
      new_node =
          result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
    }
    CreateOpOutput(result, op_handle, new_node, p, place_id);
T
wip  
typhoonzero 已提交
183 184
  }
}
Y
fix pe  
Yancey1989 已提交
185 186

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainSendVars(
X
Xin Pan 已提交
187
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
188
  std::vector<std::string> send_vars;
Y
Yancey1989 已提交
189 190
  // since parameters are all in block 0,
  // it's enough to only scan send ops in block 0
191 192
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
193 194
    // TODO(Yancey1989): use a graceful method to find send op,
    // instead of the the hard code string
195
    if (op->Type() == "send") {
Y
fix pe  
Yancey1989 已提交
196 197 198 199 200 201 202 203 204 205
      auto op_vars = op->InputArgumentNames();
      send_vars.reserve(send_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      send_vars.insert(send_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return send_vars;
}

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainRecvVars(
X
Xin Pan 已提交
206
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
207
  std::vector<std::string> recv_vars;
208 209
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
210 211 212
    // TODO(Yancey1989): use a graceful method to find recv op,
    // instead of the hard code string
    if (op->Type() == "recv") {
Y
fix pe  
Yancey1989 已提交
213 214 215 216 217 218 219 220 221
      auto op_vars = op->OutputArgumentNames();
      recv_vars.reserve(recv_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      recv_vars.insert(recv_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return recv_vars;
}

Y
Yancey1989 已提交
222 223 224 225
size_t MultiDevSSAGraphBuilder::GetAppropriateDeviceID(
    const std::vector<std::string> &var_names) const {
  int64_t numel_sum = 0;
  for (auto var_name : var_names) {
X
Xin Pan 已提交
226
    if (all_vars_.find(var_name) == all_vars_.end()) continue;
Y
Yancey1989 已提交
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
    auto var_desc = all_vars_.at(var_name);
    PADDLE_ENFORCE_NOT_NULL(var_desc);
    auto dim = framework::make_ddim(var_desc->GetShape());
    int64_t numel = framework::product(dim);
    PADDLE_ENFORCE_GT(numel, 0);
    numel_sum += numel;
  }

  auto smallest =
      std::min_element(std::begin(balance_vars_), std::end(balance_vars_));
  size_t dev_id =
      static_cast<size_t>(std::distance(std::begin(balance_vars_), smallest));
  balance_vars_[dev_id] += numel_sum;
  return dev_id;
}

X
better  
Xin Pan 已提交
243 244 245 246 247
// Topology sort the graph nodes from inputs to outputs.
// Since SSAGraphBuilder depends on forward/backward nodes to assign devices
// to parameter/gradients before optimizer ops, topo sort is insufficient. (
// some optimizer ops might not depend on any nodes), we manually move all
// optimizer nodes after last backward nodes.
X
Xin Pan 已提交
248 249 250
// However, the assumption by SSAGraphBuilder should be relaxed in the future.
std::vector<ir::Node *> SortOpsAndDelayOptimizeOp(const ir::Graph &graph) {
  std::vector<ir::Node *> ret = ir::TopologySortOperations(graph);
X
better  
Xin Pan 已提交
251 252 253 254 255
  size_t last_backward = 0;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (boost::get<int>(
            ret[i]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
        static_cast<int>(OpRole::kBackward)) {
X
Xin Pan 已提交
256
      last_backward = i;
X
better  
Xin Pan 已提交
257 258 259
    }
  }

X
Xin Pan 已提交
260 261 262 263
  std::vector<ir::Node *> optimize_ops;
  std::vector<ir::Node *> sorted_ret;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (i < last_backward) {
X
Xin Pan 已提交
264 265 266
      if (static_cast<bool>(boost::get<int>(ret[i]->Op()->GetAttr(
                                OpProtoAndCheckerMaker::OpRoleAttrName())) &
                            static_cast<int>(OpRole::kOptimize))) {
X
Xin Pan 已提交
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
        optimize_ops.push_back(ret[i]);
      } else {
        sorted_ret.push_back(ret[i]);
      }
    } else if (i == last_backward) {
      sorted_ret.push_back(ret[i]);
      // Verify that no operations before optimize ops depends on optimize ops.
      std::unordered_set<ir::Node *> optimize_set(optimize_ops.begin(),
                                                  optimize_ops.end());
      for (ir::Node *n : sorted_ret) {
        for (ir::Node *in : n->inputs) {
          for (ir::Node *pre_n : in->inputs) {
            PADDLE_ENFORCE(optimize_set.find(pre_n) == optimize_set.end(),
                           "optimize operations cannot be depended by forward "
                           "or backward node %s -> %s",
                           pre_n->Name(), n->Name());
          }
        }
X
Xin Pan 已提交
285
      }
X
Xin Pan 已提交
286 287 288 289
      sorted_ret.insert(sorted_ret.end(), optimize_ops.begin(),
                        optimize_ops.end());
    } else {
      sorted_ret.push_back(ret[i]);
X
Xin Pan 已提交
290 291
    }
  }
X
better  
Xin Pan 已提交
292 293 294
  return sorted_ret;
}

X
Xin Pan 已提交
295
std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilder::ApplyImpl(
X
Xin Pan 已提交
296
    std::unique_ptr<ir::Graph> graph) const {
X
Xin Pan 已提交
297
  Init();
X
Xin Pan 已提交
298
  // Give the topology sort order and rebuild the graph structure.
X
better  
Xin Pan 已提交
299
  std::vector<ir::Node *> sorted_ops = SortOpsAndDelayOptimizeOp(*graph);
X
Xin Pan 已提交
300 301
  auto nodes = graph->ReleaseNodes();
  ir::Graph &result = *graph;
302

Y
Yancey1989 已提交
303
  // int num_trainers = Get<int>(kNumTrainers);
304

305
  for (auto &node : nodes) {
X
Xin Pan 已提交
306
    if (node->IsVar() && node->Var()) {
X
Xin Pan 已提交
307
      all_vars_.emplace(node->Name(), node->Var());
308
    }
C
fix ci  
chengduoZH 已提交
309
  }
C
chengduoZH 已提交
310
  std::unordered_set<std::string> og_has_been_broadcast;
Y
Yu Yang 已提交
311 312

  // We cannot invoke resize. It is a bug of GCC 4.8
X
Xin Pan 已提交
313 314 315
  result.Set(kGraphVars, new GraphVars(places_.size()));
  result.Set(kGraphDepVars, new GraphDepVars);
  result.Set(kGraphOps, new GraphOps);
316

Y
fix pe  
Yancey1989 已提交
317
  // find send/recv vars so that we can place the distributed training
318
  // related op in the place 0
X
Xin Pan 已提交
319 320
  auto send_vars = FindDistTrainSendVars(sorted_ops);
  auto recv_vars = FindDistTrainRecvVars(sorted_ops);
T
typhoonzero 已提交
321

C
chengduoZH 已提交
322 323 324
  std::vector<std::unordered_set<std::string>> bcast_var_name_set;
  bcast_var_name_set.resize(places_.size());

C
chengduoZH 已提交
325
  size_t cur_device_id = 0;
Y
Yu Yang 已提交
326
  bool is_forwarding = true;
Y
Yancey1989 已提交
327
  bool is_dist_train = false;
328

X
Xin Pan 已提交
329 330
  std::unordered_map<std::string, int> sharded_var_device;

X
better  
Xin Pan 已提交
331
  for (ir::Node *node : sorted_ops) {
Y
Yancey1989 已提交
332
    VLOG(5) << "op name: " << node->Op()->Type();
Y
Yancey1989 已提交
333
    if (boost::get<int>(
334
            node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Yancey1989 已提交
335
        static_cast<int>(OpRole::kRPC)) {
X
Xin Pan 已提交
336
      int op_dev_id = CreateRPCOp(&result, node, &sharded_var_device);
Y
Yancey1989 已提交
337 338 339 340 341 342 343 344 345 346 347 348
      PADDLE_ENFORCE(op_dev_id != -1,
                     "Can not schedule the RPC operator to the right place.");
      if (node->Op()->Type() == "recv") {
        auto recv_vars_attr =
            boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
                OpProtoAndCheckerMaker::OpRoleVarAttrName()));
        PADDLE_ENFORCE(recv_vars_attr.size() == 2UL);  // [parameter, gradient]
        if (recv_vars_attr[0].find(".block") == std::string::npos) {
          bcast_var_name_set[op_dev_id].emplace(recv_vars_attr[0]);
        }
      }
      is_dist_train = true;
349 350 351
    } else if (boost::get<int>(node->Op()->GetAttr(
                   OpProtoAndCheckerMaker::OpRoleAttrName())) ==
               static_cast<int>(OpRole::kDist)) {
X
Xin Pan 已提交
352
      int op_dev_id = CreateDistTrainOp(&result, node, &sharded_var_device);
Y
Yancey1989 已提交
353 354 355 356
      if (node->Op()->Type() == "concat") {
        auto origin_param_name = node->Op()->OutputArgumentNames()[0];
        bcast_var_name_set[op_dev_id].emplace(origin_param_name);
      }
X
Xin Pan 已提交
357
    } else if (IsScaleLossOp(node)) {
Y
Yu Yang 已提交
358
      // user can customize loss@grad if not use_default_grad_scale_
Y
yuyang18 已提交
359 360
      if (strategy_.gradient_scale_ !=
          BuildStrategy::GradientScaleStrategy::kCustomized) {
X
Xin Pan 已提交
361
        // TODO(paddle-dev): Why is there no input for this op_handle?
362
        auto loss_grad_name = node->Op()->OutputArgumentNames()[0];
363
        CreateScaleLossGradOp(&result, loss_grad_name, node->outputs[0]);
Y
Yu Yang 已提交
364
      }
365 366 367 368
      // This assumes the backward generating code will ensure IsScaleLossOp
      // is true only for the op that scale the final scalar loss.
      // It also assumes backward op will always follow the forward op in
      // the block.
Y
Yancey1989 已提交
369
      VLOG(5) << "this is loss scale op!";
Y
Yu Yang 已提交
370
      is_forwarding = false;
Y
Yu Yang 已提交
371
    } else {
X
Xin Pan 已提交
372
      int op_dev_id = GetOpDeviceID(result, node, sharded_var_device);
Y
Yancey1989 已提交
373
      VLOG(5) << "on device id: " << op_dev_id;
C
chengduo 已提交
374
      if (op_dev_id != -1) {  // This op only runs on one specific device.
X
Xin Pan 已提交
375
        CreateComputationalOp(&result, node, op_dev_id);
376
        for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
377
          sharded_var_device.emplace(n->Name(), op_dev_id);
C
chengduoZH 已提交
378
        }
C
chengduo 已提交
379 380 381
      } else {
        // This op runs on all devices, and its output may have parameter's
        // gradients.
X
Xin Pan 已提交
382
        // TODO(paddle-dev): Why is so special about "read" op?
383 384
        if (node->Op()->Type() == "read" && strategy_.enable_data_balance_) {
          node->Op()->SetAttr("throw_eof_exp", false);
X
Xin Pan 已提交
385
          CreateComputationalOps(&result, node, places_.size());
386
          const auto &data_var_names = node->Op()->Output("Out");
387
          InsertDataBalanceOp(&result, data_var_names);
F
fengjiayi 已提交
388
        } else {
X
Xin Pan 已提交
389
          CreateComputationalOps(&result, node, places_.size());
390 391
        }

Y
Yancey1989 已提交
392 393
        // if (!is_forwarding && (places_.size() > 1 || num_trainers > 1)) {
        if (!is_forwarding && nccl_ctxs_->contexts_.size() > 1) {
C
chengduo 已提交
394 395
          // Currently, we assume that once gradient is generated, it can be
          // broadcast, and each gradient is only broadcast once.
396
          if (static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
C
chengduo 已提交
397 398 399
                                    OpProtoAndCheckerMaker::OpRoleAttrName())) &
                                static_cast<int>(OpRole::kBackward))) {
            try {
400 401
              auto backward_vars = boost::get<std::vector<std::string>>(
                  node->Op()->GetNullableAttr(
C
chengduo 已提交
402
                      OpProtoAndCheckerMaker::OpRoleVarAttrName()));
Y
yuyang18 已提交
403

C
chengduo 已提交
404
              PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
Y
yuyang18 已提交
405

C
chengduo 已提交
406 407 408
              for (size_t i = 0; i < backward_vars.size(); i += 2) {
                auto &p_name = backward_vars[i];
                auto &g_name = backward_vars[i + 1];
M
minqiyang 已提交
409
                VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;
Y
yuyang18 已提交
410

C
chengduo 已提交
411 412 413 414
                switch (strategy_.reduce_) {
                  case BuildStrategy::ReduceStrategy::kReduce:
                    cur_device_id = GetAppropriateDeviceID({g_name});
                    CreateReduceOp(&result, g_name, cur_device_id);
X
Xin Pan 已提交
415
                    sharded_var_device.emplace(g_name, cur_device_id);
Y
Yancey1989 已提交
416 417 418
                    if (!is_dist_train) {
                      bcast_var_name_set[cur_device_id].emplace(p_name);
                    }
C
chengduo 已提交
419 420 421 422 423 424 425 426 427 428 429 430 431
                    break;
                  case BuildStrategy::ReduceStrategy::kAllReduce:
                    if (IsSparseGradient(g_name)) {
                      CreateReduceOp(&result, g_name, 0);
                      CreateBroadcastOp(&result, g_name, 0);
                    } else {
                      InsertAllReduceOp(&result, g_name);
                    }
                    break;
                  default:
                    LOG(FATAL) << "Unknown reduce strategy ";
                    break;
                }
Y
yuyang18 已提交
432
              }
C
chengduo 已提交
433
            } catch (boost::bad_get e) {
C
chengduoZH 已提交
434
            }
Y
Yu Yang 已提交
435 436 437 438 439
          }
        }
      }
    }
  }
440
  bool use_gpu = false;
P
peizhilin 已提交
441
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
442 443 444
  use_gpu = nccl_ctxs_ != nullptr;
#endif

Y
Yancey1989 已提交
445 446 447 448 449
  // Insert broadcast operators principle:
  // 1. Broadcast optimized parameters in Reduce strategy;
  // 2. No need broadcast optimized parameters in AllReduce strategy because of
  //    the optimization sub-graph would be run on every GPU;
  // 3. Allways broadcast received parameters in Distribute Training.
Y
Yancey1989 已提交
450 451 452
  if ((use_gpu &&
       strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce) ||
      is_dist_train) {
453 454 455 456 457 458 459 460
    if (strategy_.fuse_broadcast_op_) {
      CreateFusedBroadcastOp(&result, bcast_var_name_set);
    } else {
      for (size_t dev_id = 0; dev_id < bcast_var_name_set.size(); ++dev_id) {
        auto &to_bcast_set = bcast_var_name_set[dev_id];
        for (auto &bcast_name : to_bcast_set) {
          CreateBroadcastOp(&result, bcast_name, dev_id);
        }
461
      }
C
chengduoZH 已提交
462 463
    }
  }
Y
Yu Yang 已提交
464
  /*
X
Xin Pan 已提交
465 466 467
  Dependency graph has been constructed. However, there are still data
  hazards need to be handled.
 */
Y
Yu Yang 已提交
468
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
469

Y
Yu Yang 已提交
470 471 472 473
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);
X
Xin Pan 已提交
474
  result.Erase<GraphOps>(kGraphOps);
Q
qiaolongfei 已提交
475
  return graph;
Y
Yu Yang 已提交
476 477
}

Y
Yancey1989 已提交
478 479 480
bool MultiDevSSAGraphBuilder::IsSparseGradient(const std::string &og) const {
  PADDLE_ENFORCE(all_vars_.count(og) != 0);
  if (all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS) {
C
fix ci  
chengduoZH 已提交
481 482 483
    return true;
  }
  return false;
484 485
}

486 487
void MultiDevSSAGraphBuilder::SetCommunicationContext(
    OpHandleBase *op_handle, const platform::Place &p) const {
P
peizhilin 已提交
488
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
489 490 491 492 493 494 495 496 497 498
  if (nccl_ctxs_ == nullptr) {
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
  }
#else
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
#endif
}

X
Xin Pan 已提交
499
void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result,
C
chengduoZH 已提交
500
                                                const std::string &p_name,
C
chengduoZH 已提交
501
                                                size_t src_dev_id) const {
P
peizhilin 已提交
502
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
polish  
Xin Pan 已提交
503 504 505
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
C
chengduoZH 已提交
506
#else
X
polish  
Xin Pan 已提交
507 508 509
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
C
chengduoZH 已提交
510
#endif
X
Xin Pan 已提交
511
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
X
Xin Pan 已提交
512

X
Xin Pan 已提交
513
  auto *in =
X
clean1  
Xin Pan 已提交
514
      result->Get<GraphVars>(kGraphVars).at(src_dev_id).at(p_name).back();
C
chengduoZH 已提交
515 516 517 518
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
519
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
520
    auto &vars = result->Get<GraphVars>(kGraphVars).at(i).at(p_name);
X
polish  
Xin Pan 已提交
521 522 523
    auto *out_var = new VarHandle(
        result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(),
        i, p_name, p);
C
chengduoZH 已提交
524 525 526 527 528
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
  }
}

529 530 531
void MultiDevSSAGraphBuilder::CreateFusedBroadcastOp(
    ir::Graph *result,
    const std::vector<std::unordered_set<std::string>> &bcast_varnames) const {
P
peizhilin 已提交
532
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
#else
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
#endif
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
  }

  for (size_t dev_id = 0; dev_id < bcast_varnames.size(); ++dev_id) {
    for (auto &p_name : bcast_varnames[dev_id]) {
      auto *in =
X
clean1  
Xin Pan 已提交
551
          result->Get<GraphVars>(kGraphVars).at(dev_id).at(p_name).back();
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
      op_handle->AddInput(in);
      for (size_t out_dev_id = 0; out_dev_id < places_.size(); ++out_dev_id) {
        auto &p = places_[out_dev_id];
        auto &vars =
            result->Get<GraphVars>(kGraphVars).at(out_dev_id).at(p_name);
        auto *out_var = new VarHandle(
            result->CreateEmptyNode(p_name, ir::Node::Type::kVariable),
            vars.size(), out_dev_id, p_name, p);
        vars.emplace_back(out_var);
        op_handle->AddOutput(out_var);
      }
    }
  }
}

X
Xin Pan 已提交
567
void MultiDevSSAGraphBuilder::CreateComputationalOp(ir::Graph *result,
568
                                                    ir::Node *node,
C
chengduoZH 已提交
569
                                                    int dev_id) const {
X
Xin Pan 已提交
570
  result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
571
      new ComputationOpHandle(result->CreateOpNode(node->Op()),
572 573
                              local_scopes_[dev_id], places_[dev_id]));
  CreateOpHandleIOs(result, node, dev_id);
C
chengduoZH 已提交
574 575
}

X
Xin Pan 已提交
576
void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result,
C
chengduoZH 已提交
577
                                                const std::string &og) const {
P
peizhilin 已提交
578
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
579
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
580 581
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
582
#else
X
Xin Pan 已提交
583
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
584 585
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
586
#endif
X
clean1  
Xin Pan 已提交
587
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
Y
Yu Yang 已提交
588 589 590

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
591
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
592
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
Y
Yu Yang 已提交
593 594
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
595
    op_handle->AddInput(prev_grad);
Y
Yu Yang 已提交
596

X
Xin Pan 已提交
597
    auto var =
X
polish  
Xin Pan 已提交
598 599
        new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                      vars.size(), i, og, p);
Y
Yu Yang 已提交
600 601 602 603 604
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
}

605
void MultiDevSSAGraphBuilder::InsertDataBalanceOp(
X
Xin Pan 已提交
606
    ir::Graph *result, const std::vector<std::string> &datas) const {
P
peizhilin 已提交
607
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
608
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
609 610
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
F
fengjiayi 已提交
611
#else
X
Xin Pan 已提交
612
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
613 614
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_));
F
fengjiayi 已提交
615
#endif
X
clean1  
Xin Pan 已提交
616
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
617 618 619 620
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
    for (const std::string &d_name : datas) {
X
Xin Pan 已提交
621
      auto &vars = result->Get<GraphVars>(kGraphVars)[i][d_name];
622
      PADDLE_ENFORCE(!vars.empty());
X
clean1  
Xin Pan 已提交
623
      op_handle->AddInput(vars.back());
X
polish  
Xin Pan 已提交
624 625 626
      auto var = new VarHandle(
          result->CreateEmptyNode(d_name, ir::Node::Type::kVariable),
          vars.size(), i, d_name, p);
627 628 629 630 631 632
      vars.emplace_back(var);
      op_handle->AddOutput(var);
    }
  }
}

X
Xin Pan 已提交
633 634 635
int MultiDevSSAGraphBuilder::GetOpDeviceID(
    const ir::Graph &graph, ir::Node *node,
    const std::unordered_map<std::string, int> &sharded_var_device) const {
Y
yuyang18 已提交
636
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
C
chengduoZH 已提交
637 638
    return -1;
  }
639
  int op_role = boost::get<int>(
640
      node->Op()->GetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
641 642
  if (op_role != static_cast<int>(framework::OpRole::kOptimize)) {
    return -1;
C
chengduoZH 已提交
643
  }
644
  auto param_grad = boost::get<std::vector<std::string>>(
X
Xin Pan 已提交
645
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
646 647

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
X
Xin Pan 已提交
648
  int dev_id = GetVarDeviceID(graph, param_grad[1], sharded_var_device);
X
Xin Pan 已提交
649 650
  PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]",
                    node->Op()->Type(), param_grad[0], param_grad[1]);
651
  return dev_id;
652 653
}

X
Xin Pan 已提交
654 655 656
int MultiDevSSAGraphBuilder::GetVarDeviceID(
    const ir::Graph &graph, const std::string &varname,
    const std::unordered_map<std::string, int> &sharded_var_device) const {
X
Xin Pan 已提交
657
  auto got = sharded_var_device.find(varname);
C
chengduo 已提交
658 659 660 661 662 663
  if (got == sharded_var_device.end()) {
    auto pos = varname.find(framework::kNewGradSuffix);
    if (pos != std::string::npos) {
      got = sharded_var_device.find(varname.substr(0, pos));
    }
  }
X
Xin Pan 已提交
664
  return got == sharded_var_device.end() ? -1 : got->second;
C
chengduoZH 已提交
665 666
}

667
void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(
668 669
    ir::Graph *result, const std::string &loss_grad_name,
    ir::Node *out_var_node) const {
Y
Yu Yang 已提交
670
  for (size_t i = 0; i < places_.size(); ++i) {
Y
yuyang18 已提交
671 672
    // Insert ScaleCost OpHandle
    auto *dev_ctx = platform::DeviceContextPool::Instance().Get(places_[i]);
X
Xin Pan 已提交
673
    auto *op_handle = new ScaleLossGradOpHandle(
X
polish  
Xin Pan 已提交
674
        result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation),
Y
yuyang18 已提交
675
        local_scopes_.size(), local_scopes_[i], places_[i], dev_ctx);
X
Xin Pan 已提交
676
    result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
Y
Yu Yang 已提交
677 678 679 680 681 682 683

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

684 685
    CreateOpOutput(result, op_handle,
                   result->CreateVarNode(out_var_node->Var()), places_[i], i);
Y
Yu Yang 已提交
686 687 688
  }
}

X
Xin Pan 已提交
689
void MultiDevSSAGraphBuilder::CreateComputationalOps(ir::Graph *result,
690
                                                     ir::Node *node,
T
typhoonzero 已提交
691 692
                                                     size_t num_places) const {
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
693 694
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
X
Xin Pan 已提交
695
    result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
696
        new ComputationOpHandle(result->CreateOpNode(node->Op()), s, p));
697
    CreateOpHandleIOs(result, node, scope_idx);
Y
Yu Yang 已提交
698 699 700
  }
}

X
Xin Pan 已提交
701
VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result,
C
chengduoZH 已提交
702 703
                                                   const std::string &og,
                                                   int dst_dev_id) const {
P
peizhilin 已提交
704
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
705
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
706 707
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
708
#else
X
Xin Pan 已提交
709
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
710 711
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
712
#endif
X
clean1  
Xin Pan 已提交
713
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
C
chengduoZH 已提交
714 715 716

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
717
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
718
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
C
chengduoZH 已提交
719 720
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
721
    op_handle->AddInput(prev_grad);
C
chengduoZH 已提交
722
  }
X
Xin Pan 已提交
723
  auto &vars = result->Get<GraphVars>(kGraphVars)[dst_dev_id][og];
X
polish  
Xin Pan 已提交
724 725 726
  auto var =
      new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                    vars.size(), dst_dev_id, og, places_[dst_dev_id]);
C
chengduoZH 已提交
727 728 729 730 731
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

X
Xin Pan 已提交
732 733 734
int MultiDevSSAGraphBuilder::CreateDistTrainOp(
    ir::Graph *result, ir::Node *node,
    std::unordered_map<std::string, int> *sharded_var_device) const {
Y
Yancey1989 已提交
735
  int op_dev_id = -1;
736 737 738
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
X
Xin Pan 已提交
739
    input_var_names.push_back(input->Name());
740 741
  }
  for (ir::Node *output : node->outputs) {
X
Xin Pan 已提交
742
    output_var_names.push_back(output->Name());
743 744 745
  }

  if (node->Op()->Type() == "split_byref" ||
746 747
      node->Op()->Type() == "split_selected_rows" ||
      node->Op()->Type() == "split_ids") {
X
Xin Pan 已提交
748
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
749 750
    op_dev_id =
        GetVarDeviceID(*result, input_var_names[0], *sharded_var_device);
Y
Yancey1989 已提交
751
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
752 753
      op_dev_id = GetAppropriateDeviceID(input_var_names);
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
754
        sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
755 756
      }
    }
757
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
758
      sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
759
    }
760
  } else if (node->Op()->Type() == "concat") {
X
Xin Pan 已提交
761 762
    op_dev_id =
        GetVarDeviceID(*result, input_var_names[0], *sharded_var_device);
763
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
764
      sharded_var_device->emplace(varname, op_dev_id);
Y
yi.wu 已提交
765
    }
Y
Yancey1989 已提交
766
  } else {
767
    LOG(ERROR) << "got unexpected dist op: " << node->Op()->Type();
W
Wu Yi 已提交
768
    PADDLE_THROW(
Y
Yancey1989 已提交
769 770 771 772 773
        "the distribute training related op should be in [split_byref, "
        "concat].");
  }

  PADDLE_ENFORCE(op_dev_id != -1,
774 775
                 "can not find right place for distributed op: %s",
                 node->Op()->Type());
Y
Yancey1989 已提交
776

777
  CreateComputationalOp(result, node, op_dev_id);
Y
Yancey1989 已提交
778
  return op_dev_id;
W
Wu Yi 已提交
779 780 781
}

void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) {
X
clean1  
Xin Pan 已提交
782
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
783 784 785 786 787 788
  for (ir::Node *input : node->inputs) {
    VarHandle *var = nullptr;
    for (int place_offset = 0; place_offset < num_places; ++place_offset) {
      auto &var_holders = result->Get<GraphVars>(kGraphVars)[place_offset];
      auto &var_holder = var_holders[input->Name()];
      if (!var_holder.empty()) {
X
clean1  
Xin Pan 已提交
789
        var = *var_holder.rbegin();
W
Wu Yi 已提交
790 791 792
        op_handle->AddInput(var);
      }
    }
Y
Yancey1989 已提交
793 794 795
  }
}

796
// Create RPC related op handles that connects its in ops and out ops.
X
Xin Pan 已提交
797 798 799
int MultiDevSSAGraphBuilder::CreateRPCOp(
    ir::Graph *result, ir::Node *node,
    std::unordered_map<std::string, int> *sharded_var_device) const {
Y
Yancey1989 已提交
800
  int op_dev_id = -1;
801
  if (node->Op()->Type() == "send") {
X
Xin Pan 已提交
802
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
803 804
    op_dev_id =
        GetVarDeviceID(*result, node->inputs[0]->Name(), *sharded_var_device);
X
Xin Pan 已提交
805 806
    PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
                   "This hack no longer holds, please fix.");
Y
Yancey1989 已提交
807 808 809
    // the variable name which contains .block means it was splited by
    // split_byref op
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce &&
X
Xin Pan 已提交
810
        node->inputs[0]->Name().find(".block") == std::string::npos) {
811 812
      std::vector<std::string> input_var_names;
      for (ir::Node *n : node->inputs) {
X
Xin Pan 已提交
813
        input_var_names.push_back(n->Name());
814
      }
W
Wu Yi 已提交
815 816 817 818
      auto send_param_grad = boost::get<std::vector<std::string>>(
          node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
      op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
M
minqiyang 已提交
819 820
      VLOG(10) << "send grad " << input_var_names[0] << " origin "
               << send_param_grad[1] << " place: " << op_dev_id;
821
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
822
        sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
823
      }
X
Xin Pan 已提交
824
      sharded_var_device->emplace(send_param_grad[1], op_dev_id);
Y
Yancey1989 已提交
825
    }
826 827 828
  } else if (node->Op()->Type() == "recv") {
    std::vector<std::string> output_var_names;
    for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
829
      output_var_names.push_back(n->Name());
830
    }
W
Wu Yi 已提交
831 832 833
    auto recv_param_grad = boost::get<std::vector<std::string>>(
        node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
    if (recv_param_grad.size() == 2U) {
X
Xin Pan 已提交
834 835
      op_dev_id =
          GetVarDeviceID(*result, recv_param_grad[1], *sharded_var_device);
M
minqiyang 已提交
836 837 838
      VLOG(10) << "recv param " << recv_param_grad[0]
               << " get grad place: " << recv_param_grad[1]
               << " place: " << op_dev_id;
W
Wu Yi 已提交
839 840 841
    } else {
      op_dev_id = GetAppropriateDeviceID(output_var_names);
    }
842
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
843
      sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
844 845
    }
  } else {
W
Wu Yi 已提交
846
    // send_barrier, fetch_barrier will run on place 0;
Y
Yancey1989 已提交
847 848 849 850
    op_dev_id = 0;
  }

  PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s",
851
                 node->Op()->Type());
X
Xin Pan 已提交
852
  result->Get<GraphOps>(kGraphOps).emplace_back(new RPCOpHandle(
853 854
      result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id],
      node->Op()->Type(), places_[op_dev_id]));
Y
fix pe  
Yancey1989 已提交
855

W
Wu Yi 已提交
856 857
  if (node->Op()->Type() == "send") {
    CreateOpHandleIOs(result, node, op_dev_id);
Y
Yancey1989 已提交
858
  } else {
W
Wu Yi 已提交
859 860 861
    // send_barrier, recv, fetch_barrier's inputs are deps var, get them from
    // all places
    auto p = places_[op_dev_id];
X
clean1  
Xin Pan 已提交
862
    auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
863 864
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
Y
Yancey1989 已提交
865

W
Wu Yi 已提交
866 867 868 869
    SetOpInputsAllPlaces(result, node, places_.size());
    for (ir::Node *output : node->outputs) {
      int outvar_dev_id = op_dev_id;
      if (node->Op()->Type() == "fetch_barrier") {
X
Xin Pan 已提交
870 871
        outvar_dev_id =
            GetVarDeviceID(*result, output->Name(), *sharded_var_device);
Q
Qiao Longfei 已提交
872
        PADDLE_ENFORCE_NE(outvar_dev_id, -1, "output name %s", output->Name());
W
Wu Yi 已提交
873 874 875 876 877 878 879 880 881 882 883 884
      }
      p = places_[outvar_dev_id];
      ir::Node *new_node = nullptr;
      if (output->Var()) {
        new_node = result->CreateVarNode(output->Var());
      } else {
        new_node =
            result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
      }
      CreateOpOutput(result, op_handle, new_node, p, outvar_dev_id);
    }
  }
Y
Yancey1989 已提交
885
  return op_dev_id;
Y
Yu Yang 已提交
886 887
}

888
bool MultiDevSSAGraphBuilder::IsScaleLossOp(ir::Node *node) const {
Y
yuyang18 已提交
889
  return boost::get<int>(
890
             node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Fix bug  
yuyang18 已提交
891 892 893
             (static_cast<int>(OpRole::kBackward) |
              static_cast<int>(OpRole::kLoss)) &&
         !loss_var_name_.empty();  // If loss_var is empty. This is test mode
Y
Yu Yang 已提交
894
}
Y
Yu Yang 已提交
895 896 897
}  // namespace details
}  // namespace framework
}  // namespace paddle
X
Xin Pan 已提交
898

X
Xin Pan 已提交
899
REGISTER_PASS(multi_devices_pass,
X
Xin Pan 已提交
900 901 902 903 904
              paddle::framework::details::MultiDevSSAGraphBuilder)
    .RequirePassAttr(paddle::framework::details::kLossVarName)
    .RequirePassAttr(paddle::framework::details::kPlaces)
    .RequirePassAttr(paddle::framework::details::kParams)
    .RequirePassAttr(paddle::framework::details::kLocalScopes)
905 906
    .RequirePassAttr(paddle::framework::details::kStrategy)
    .RequirePassAttr(paddle::framework::details::kNumTrainers);