multi_devices_graph_pass.cc 34.6 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
C
chengduoZH 已提交
14
#include <algorithm>
Y
Yancey1989 已提交
15
#include <fstream>
C
chengduoZH 已提交
16
#include <string>
C
chengduoZH 已提交
17
#include <utility>
C
chengduoZH 已提交
18 19
#include <vector>

20
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
C
chengduoZH 已提交
21
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
22
#include "paddle/fluid/framework/details/computation_op_handle.h"
23
#include "paddle/fluid/framework/details/data_balance_op_handle.h"
24
#include "paddle/fluid/framework/details/fused_broadcast_op_handle.h"
X
Xin Pan 已提交
25
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
C
chengduoZH 已提交
26
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yancey1989 已提交
27
#include "paddle/fluid/framework/details/rpc_op_handle.h"
Y
Yu Yang 已提交
28
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
X
better  
Xin Pan 已提交
29
#include "paddle/fluid/framework/ir/graph_helper.h"
X
Xin Pan 已提交
30
#include "paddle/fluid/framework/ir/node.h"
Y
Fix bug  
yuyang18 已提交
31
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
32
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
33

Y
Yu Yang 已提交
34 35 36
namespace paddle {
namespace framework {
namespace details {
X
Xin Pan 已提交
37

X
Xin Pan 已提交
38
namespace {
X
Xin Pan 已提交
39
// TODO(panyx0718): Clean this up as well.
X
Xin Pan 已提交
40 41 42 43 44
// all operators. NOTE that even we use a vector here, the operators is
// unordered.
typedef std::vector<OpHandleBase *> GraphOps;
const char kGraphOps[] = "ops";

X
Xin Pan 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
void PolishGraphToSupportDataHazards(ir::Graph *graph) {
  for (auto &var_map : graph->Get<GraphVars>(kGraphVars)) {
    for (auto &name_pair : var_map) {
      if (name_pair.second.size() <= 1) {
        continue;
      }
      auto it_new = name_pair.second.rbegin();
      auto it_old = name_pair.second.rbegin();
      ++it_old;
      for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) {
        OpHandleBase *write_op = (*it_new)->GeneratedOp();
        const auto &read_ops = (*it_old)->PendingOps();

        for (auto *read_op : read_ops) {
          // Manually add a dependency var from read_op to write_op;
          if (read_op == write_op) {
            // Read Write is the same op.
            continue;
          }
          bool has_dep = false;
          for (auto *r_out : read_op->Outputs()) {
            for (auto *w_in : write_op->Inputs()) {
              if (r_out->Node() == w_in->Node()) {
                has_dep = true;
                break;
              }
            }
          }
          if (has_dep) continue;

          auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar());
          read_op->AddOutput(dep_var);
          write_op->AddInput(dep_var);
          graph->Get<GraphDepVars>(kGraphDepVars).emplace(dep_var);
        }
      }
    }
  }
}

VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node,
                                      const platform::Place &place,
                                      size_t place_offset) {
  auto &var_holders = graph->Get<GraphVars>(kGraphVars)[place_offset];
  auto &var_holder = var_holders[node->Name()];
  VarHandle *var = nullptr;
  if (var_holder.empty()) {
    if (node->Var()) {
      var = new VarHandle(graph->CreateVarNode(node->Var()), 0, place_offset,
                          node->Name(), place);
    } else {
      var = new VarHandle(
          graph->CreateEmptyNode(node->Name(), ir::Node::Type::kVariable), 0,
          place_offset, node->Name(), place);
    }
    var_holder.emplace_back(var);
  } else {
X
clean1  
Xin Pan 已提交
102
    var = *var_holder.rbegin();
X
Xin Pan 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
  }
  return var;
}

void CreateOpOutput(ir::Graph *graph, OpHandleBase *op_handle,
                    ir::Node *new_node, const platform::Place &place,
                    size_t place_offset) {
  auto &vars =
      graph->Get<GraphVars>(kGraphVars)[place_offset][new_node->Name()];
  size_t version = vars.size();
  auto var =
      new VarHandle(new_node, version, place_offset, new_node->Name(), place);
  vars.emplace_back(var);
  op_handle->AddOutput(var);
}

void AddOutputToLeafOps(ir::Graph *graph) {
  for (auto &op : graph->Get<GraphOps>(kGraphOps)) {
    if (!op->Outputs().empty()) {
      continue;
    }
    auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar());
    graph->Get<GraphDepVars>(kGraphDepVars).emplace(dummy_leaf);
    op->AddOutput(dummy_leaf);
  }
}
}  // namespace
Y
Yu Yang 已提交
130

X
Xin Pan 已提交
131 132 133 134 135 136
static const char kLossVarName[] = "loss_var_name";
static const char kPlaces[] = "places";
static const char kParams[] = "params";
static const char kLocalScopes[] = "local_scopes";
static const char kStrategy[] = "strategy";

X
Xin Pan 已提交
137
void MultiDevSSAGraphBuilder::Init() const {
X
clean  
Xin Pan 已提交
138 139 140
  all_vars_.clear();
  balance_vars_.clear();

X
Xin Pan 已提交
141 142 143 144
  loss_var_name_ = Get<const std::string>(kLossVarName);
  places_ = Get<const std::vector<platform::Place>>(kPlaces);
  local_scopes_ = Get<const std::vector<Scope *>>(kLocalScopes);
  strategy_ = Get<const BuildStrategy>(kStrategy);
P
peizhilin 已提交
145
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
146
  nccl_ctxs_ = &Get<platform::NCCLContextMap>("nccl_ctxs");
Y
Yu Yang 已提交
147
#endif
X
Xin Pan 已提交
148

X
Xin Pan 已提交
149
  for (auto &p : Get<const std::unordered_set<std::string>>(kParams)) {
Y
Yu Yang 已提交
150 151
    grad_names_.insert(GradVarName(p));
  }
Y
Yancey1989 已提交
152
  balance_vars_.resize(places_.size(), 0);
Y
yuyang18 已提交
153 154 155 156 157
  if (strategy_.enable_data_balance_ && places_.size() == 1) {
    LOG(WARNING) << "It is no need to enable data balance when there is only "
                    "one place. enable_data_balance is set to False.";
    strategy_.enable_data_balance_ = false;
  }
Y
Yu Yang 已提交
158 159
}

X
Xin Pan 已提交
160 161
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(ir::Graph *result,
                                                ir::Node *node,
Y
Yu Yang 已提交
162 163
                                                size_t place_id) const {
  auto p = places_[place_id];
X
clean1  
Xin Pan 已提交
164
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
X
Xin Pan 已提交
165 166
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
T
wip  
typhoonzero 已提交
167

168 169
  for (ir::Node *input : node->inputs) {
    VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id);
T
wip  
typhoonzero 已提交
170 171 172
    op_handle->AddInput(var);
  }

173
  for (ir::Node *output : node->outputs) {
X
polish  
Xin Pan 已提交
174 175 176 177 178 179 180 181
    ir::Node *new_node = nullptr;
    if (output->Var()) {
      new_node = result->CreateVarNode(output->Var());
    } else {
      new_node =
          result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
    }
    CreateOpOutput(result, op_handle, new_node, p, place_id);
T
wip  
typhoonzero 已提交
182 183
  }
}
Y
fix pe  
Yancey1989 已提交
184 185

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainSendVars(
X
Xin Pan 已提交
186
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
187
  std::vector<std::string> send_vars;
Y
Yancey1989 已提交
188 189
  // since parameters are all in block 0,
  // it's enough to only scan send ops in block 0
190 191
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
192 193
    // TODO(Yancey1989): use a graceful method to find send op,
    // instead of the the hard code string
194
    if (op->Type() == "send") {
Y
fix pe  
Yancey1989 已提交
195 196 197 198 199 200 201 202 203 204
      auto op_vars = op->InputArgumentNames();
      send_vars.reserve(send_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      send_vars.insert(send_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return send_vars;
}

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainRecvVars(
X
Xin Pan 已提交
205
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
206
  std::vector<std::string> recv_vars;
207 208
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
209 210 211
    // TODO(Yancey1989): use a graceful method to find recv op,
    // instead of the hard code string
    if (op->Type() == "recv") {
Y
fix pe  
Yancey1989 已提交
212 213 214 215 216 217 218 219 220
      auto op_vars = op->OutputArgumentNames();
      recv_vars.reserve(recv_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      recv_vars.insert(recv_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return recv_vars;
}

Y
Yancey1989 已提交
221 222 223 224
size_t MultiDevSSAGraphBuilder::GetAppropriateDeviceID(
    const std::vector<std::string> &var_names) const {
  int64_t numel_sum = 0;
  for (auto var_name : var_names) {
X
Xin Pan 已提交
225
    if (all_vars_.find(var_name) == all_vars_.end()) continue;
Y
Yancey1989 已提交
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
    auto var_desc = all_vars_.at(var_name);
    PADDLE_ENFORCE_NOT_NULL(var_desc);
    auto dim = framework::make_ddim(var_desc->GetShape());
    int64_t numel = framework::product(dim);
    PADDLE_ENFORCE_GT(numel, 0);
    numel_sum += numel;
  }

  auto smallest =
      std::min_element(std::begin(balance_vars_), std::end(balance_vars_));
  size_t dev_id =
      static_cast<size_t>(std::distance(std::begin(balance_vars_), smallest));
  balance_vars_[dev_id] += numel_sum;
  return dev_id;
}

X
better  
Xin Pan 已提交
242 243 244 245 246
// Topology sort the graph nodes from inputs to outputs.
// Since SSAGraphBuilder depends on forward/backward nodes to assign devices
// to parameter/gradients before optimizer ops, topo sort is insufficient. (
// some optimizer ops might not depend on any nodes), we manually move all
// optimizer nodes after last backward nodes.
X
Xin Pan 已提交
247 248 249
// However, the assumption by SSAGraphBuilder should be relaxed in the future.
std::vector<ir::Node *> SortOpsAndDelayOptimizeOp(const ir::Graph &graph) {
  std::vector<ir::Node *> ret = ir::TopologySortOperations(graph);
X
better  
Xin Pan 已提交
250 251 252 253 254
  size_t last_backward = 0;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (boost::get<int>(
            ret[i]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
        static_cast<int>(OpRole::kBackward)) {
X
Xin Pan 已提交
255
      last_backward = i;
X
better  
Xin Pan 已提交
256 257 258
    }
  }

X
Xin Pan 已提交
259 260 261 262
  std::vector<ir::Node *> optimize_ops;
  std::vector<ir::Node *> sorted_ret;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (i < last_backward) {
X
Xin Pan 已提交
263 264 265
      if (static_cast<bool>(boost::get<int>(ret[i]->Op()->GetAttr(
                                OpProtoAndCheckerMaker::OpRoleAttrName())) &
                            static_cast<int>(OpRole::kOptimize))) {
X
Xin Pan 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
        optimize_ops.push_back(ret[i]);
      } else {
        sorted_ret.push_back(ret[i]);
      }
    } else if (i == last_backward) {
      sorted_ret.push_back(ret[i]);
      // Verify that no operations before optimize ops depends on optimize ops.
      std::unordered_set<ir::Node *> optimize_set(optimize_ops.begin(),
                                                  optimize_ops.end());
      for (ir::Node *n : sorted_ret) {
        for (ir::Node *in : n->inputs) {
          for (ir::Node *pre_n : in->inputs) {
            PADDLE_ENFORCE(optimize_set.find(pre_n) == optimize_set.end(),
                           "optimize operations cannot be depended by forward "
                           "or backward node %s -> %s",
                           pre_n->Name(), n->Name());
          }
        }
X
Xin Pan 已提交
284
      }
X
Xin Pan 已提交
285 286 287 288
      sorted_ret.insert(sorted_ret.end(), optimize_ops.begin(),
                        optimize_ops.end());
    } else {
      sorted_ret.push_back(ret[i]);
X
Xin Pan 已提交
289 290
    }
  }
X
better  
Xin Pan 已提交
291 292 293
  return sorted_ret;
}

X
Xin Pan 已提交
294
std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilder::ApplyImpl(
X
Xin Pan 已提交
295
    std::unique_ptr<ir::Graph> graph) const {
X
Xin Pan 已提交
296
  Init();
X
Xin Pan 已提交
297
  // Give the topology sort order and rebuild the graph structure.
X
better  
Xin Pan 已提交
298
  std::vector<ir::Node *> sorted_ops = SortOpsAndDelayOptimizeOp(*graph);
X
Xin Pan 已提交
299 300
  auto nodes = graph->ReleaseNodes();
  ir::Graph &result = *graph;
301 302

  for (auto &node : nodes) {
X
Xin Pan 已提交
303
    if (node->IsVar() && node->Var()) {
X
Xin Pan 已提交
304
      all_vars_.emplace(node->Name(), node->Var());
305
    }
C
fix ci  
chengduoZH 已提交
306
  }
C
chengduoZH 已提交
307
  std::unordered_set<std::string> og_has_been_broadcast;
Y
Yu Yang 已提交
308 309

  // We cannot invoke resize. It is a bug of GCC 4.8
X
Xin Pan 已提交
310 311 312
  result.Set(kGraphVars, new GraphVars(places_.size()));
  result.Set(kGraphDepVars, new GraphDepVars);
  result.Set(kGraphOps, new GraphOps);
313

Y
fix pe  
Yancey1989 已提交
314
  // find send/recv vars so that we can place the distributed training
315
  // related op in the place 0
X
Xin Pan 已提交
316 317
  auto send_vars = FindDistTrainSendVars(sorted_ops);
  auto recv_vars = FindDistTrainRecvVars(sorted_ops);
T
typhoonzero 已提交
318

C
chengduoZH 已提交
319 320 321
  std::vector<std::unordered_set<std::string>> bcast_var_name_set;
  bcast_var_name_set.resize(places_.size());

C
chengduoZH 已提交
322
  size_t cur_device_id = 0;
Y
Yu Yang 已提交
323
  bool is_forwarding = true;
Y
Yancey1989 已提交
324
  bool is_dist_train = false;
325

X
Xin Pan 已提交
326 327
  std::unordered_map<std::string, int> sharded_var_device;

X
better  
Xin Pan 已提交
328
  for (ir::Node *node : sorted_ops) {
Y
Yancey1989 已提交
329
    if (boost::get<int>(
330
            node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Yancey1989 已提交
331
        static_cast<int>(OpRole::kRPC)) {
X
Xin Pan 已提交
332
      int op_dev_id = CreateRPCOp(&result, node, &sharded_var_device);
Y
Yancey1989 已提交
333 334 335 336 337 338 339 340 341 342 343 344
      PADDLE_ENFORCE(op_dev_id != -1,
                     "Can not schedule the RPC operator to the right place.");
      if (node->Op()->Type() == "recv") {
        auto recv_vars_attr =
            boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
                OpProtoAndCheckerMaker::OpRoleVarAttrName()));
        PADDLE_ENFORCE(recv_vars_attr.size() == 2UL);  // [parameter, gradient]
        if (recv_vars_attr[0].find(".block") == std::string::npos) {
          bcast_var_name_set[op_dev_id].emplace(recv_vars_attr[0]);
        }
      }
      is_dist_train = true;
345 346 347
    } else if (boost::get<int>(node->Op()->GetAttr(
                   OpProtoAndCheckerMaker::OpRoleAttrName())) ==
               static_cast<int>(OpRole::kDist)) {
X
Xin Pan 已提交
348
      int op_dev_id = CreateDistTrainOp(&result, node, &sharded_var_device);
Y
Yancey1989 已提交
349 350 351 352
      if (node->Op()->Type() == "concat") {
        auto origin_param_name = node->Op()->OutputArgumentNames()[0];
        bcast_var_name_set[op_dev_id].emplace(origin_param_name);
      }
X
Xin Pan 已提交
353
    } else if (IsScaleLossOp(node)) {
Y
Yu Yang 已提交
354
      // user can customize loss@grad if not use_default_grad_scale_
Y
yuyang18 已提交
355 356
      if (strategy_.gradient_scale_ !=
          BuildStrategy::GradientScaleStrategy::kCustomized) {
X
Xin Pan 已提交
357
        // TODO(paddle-dev): Why is there no input for this op_handle?
358
        auto loss_grad_name = node->Op()->OutputArgumentNames()[0];
359
        CreateScaleLossGradOp(&result, loss_grad_name, node->outputs[0]);
Y
Yu Yang 已提交
360
      }
361 362 363 364
      // This assumes the backward generating code will ensure IsScaleLossOp
      // is true only for the op that scale the final scalar loss.
      // It also assumes backward op will always follow the forward op in
      // the block.
Y
Yu Yang 已提交
365
      is_forwarding = false;
Y
Yu Yang 已提交
366
    } else {
X
Xin Pan 已提交
367
      int op_dev_id = GetOpDeviceID(result, node, sharded_var_device);
C
chengduo 已提交
368
      if (op_dev_id != -1) {  // This op only runs on one specific device.
X
Xin Pan 已提交
369
        CreateComputationalOp(&result, node, op_dev_id);
370
        for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
371
          sharded_var_device.emplace(n->Name(), op_dev_id);
C
chengduoZH 已提交
372
        }
C
chengduo 已提交
373 374 375
      } else {
        // This op runs on all devices, and its output may have parameter's
        // gradients.
X
Xin Pan 已提交
376
        // TODO(paddle-dev): Why is so special about "read" op?
377 378
        if (node->Op()->Type() == "read" && strategy_.enable_data_balance_) {
          node->Op()->SetAttr("throw_eof_exp", false);
X
Xin Pan 已提交
379
          CreateComputationalOps(&result, node, places_.size());
380
          const auto &data_var_names = node->Op()->Output("Out");
381
          InsertDataBalanceOp(&result, data_var_names);
F
fengjiayi 已提交
382
        } else {
X
Xin Pan 已提交
383
          CreateComputationalOps(&result, node, places_.size());
384 385
        }

C
chengduo 已提交
386 387 388
        if (!is_forwarding && places_.size() > 1) {
          // Currently, we assume that once gradient is generated, it can be
          // broadcast, and each gradient is only broadcast once.
389
          if (static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
C
chengduo 已提交
390 391 392
                                    OpProtoAndCheckerMaker::OpRoleAttrName())) &
                                static_cast<int>(OpRole::kBackward))) {
            try {
393 394
              auto backward_vars = boost::get<std::vector<std::string>>(
                  node->Op()->GetNullableAttr(
C
chengduo 已提交
395
                      OpProtoAndCheckerMaker::OpRoleVarAttrName()));
Y
yuyang18 已提交
396

C
chengduo 已提交
397
              PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
Y
yuyang18 已提交
398

C
chengduo 已提交
399 400 401
              for (size_t i = 0; i < backward_vars.size(); i += 2) {
                auto &p_name = backward_vars[i];
                auto &g_name = backward_vars[i + 1];
M
minqiyang 已提交
402
                VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;
Y
yuyang18 已提交
403

C
chengduo 已提交
404 405 406 407
                switch (strategy_.reduce_) {
                  case BuildStrategy::ReduceStrategy::kReduce:
                    cur_device_id = GetAppropriateDeviceID({g_name});
                    CreateReduceOp(&result, g_name, cur_device_id);
X
Xin Pan 已提交
408
                    sharded_var_device.emplace(g_name, cur_device_id);
Y
Yancey1989 已提交
409 410 411
                    if (!is_dist_train) {
                      bcast_var_name_set[cur_device_id].emplace(p_name);
                    }
C
chengduo 已提交
412 413 414 415 416 417 418 419 420 421 422 423 424
                    break;
                  case BuildStrategy::ReduceStrategy::kAllReduce:
                    if (IsSparseGradient(g_name)) {
                      CreateReduceOp(&result, g_name, 0);
                      CreateBroadcastOp(&result, g_name, 0);
                    } else {
                      InsertAllReduceOp(&result, g_name);
                    }
                    break;
                  default:
                    LOG(FATAL) << "Unknown reduce strategy ";
                    break;
                }
Y
yuyang18 已提交
425
              }
C
chengduo 已提交
426
            } catch (boost::bad_get e) {
C
chengduoZH 已提交
427
            }
Y
Yu Yang 已提交
428 429 430 431 432
          }
        }
      }
    }
  }
433
  bool use_gpu = false;
P
peizhilin 已提交
434
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
435 436 437
  use_gpu = nccl_ctxs_ != nullptr;
#endif

Y
Yancey1989 已提交
438 439 440 441 442
  // Insert broadcast operators principle:
  // 1. Broadcast optimized parameters in Reduce strategy;
  // 2. No need broadcast optimized parameters in AllReduce strategy because of
  //    the optimization sub-graph would be run on every GPU;
  // 3. Allways broadcast received parameters in Distribute Training.
Y
Yancey1989 已提交
443 444 445
  if ((use_gpu &&
       strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce) ||
      is_dist_train) {
446 447 448 449 450 451 452 453
    if (strategy_.fuse_broadcast_op_) {
      CreateFusedBroadcastOp(&result, bcast_var_name_set);
    } else {
      for (size_t dev_id = 0; dev_id < bcast_var_name_set.size(); ++dev_id) {
        auto &to_bcast_set = bcast_var_name_set[dev_id];
        for (auto &bcast_name : to_bcast_set) {
          CreateBroadcastOp(&result, bcast_name, dev_id);
        }
454
      }
C
chengduoZH 已提交
455 456
    }
  }
Y
Yu Yang 已提交
457
  /*
X
Xin Pan 已提交
458 459 460
  Dependency graph has been constructed. However, there are still data
  hazards need to be handled.
 */
Y
Yu Yang 已提交
461
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
462

Y
Yu Yang 已提交
463 464 465 466
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);
X
Xin Pan 已提交
467
  result.Erase<GraphOps>(kGraphOps);
Q
qiaolongfei 已提交
468
  return graph;
Y
Yu Yang 已提交
469 470
}

Y
Yancey1989 已提交
471 472 473
bool MultiDevSSAGraphBuilder::IsSparseGradient(const std::string &og) const {
  PADDLE_ENFORCE(all_vars_.count(og) != 0);
  if (all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS) {
C
fix ci  
chengduoZH 已提交
474 475 476
    return true;
  }
  return false;
477 478
}

479 480
void MultiDevSSAGraphBuilder::SetCommunicationContext(
    OpHandleBase *op_handle, const platform::Place &p) const {
P
peizhilin 已提交
481
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
482 483 484 485 486 487 488 489 490 491
  if (nccl_ctxs_ == nullptr) {
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
  }
#else
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
#endif
}

X
Xin Pan 已提交
492
void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result,
C
chengduoZH 已提交
493
                                                const std::string &p_name,
C
chengduoZH 已提交
494
                                                size_t src_dev_id) const {
P
peizhilin 已提交
495
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
polish  
Xin Pan 已提交
496 497 498
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
C
chengduoZH 已提交
499
#else
X
polish  
Xin Pan 已提交
500 501 502
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
C
chengduoZH 已提交
503
#endif
X
Xin Pan 已提交
504
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
X
Xin Pan 已提交
505

X
Xin Pan 已提交
506
  auto *in =
X
clean1  
Xin Pan 已提交
507
      result->Get<GraphVars>(kGraphVars).at(src_dev_id).at(p_name).back();
C
chengduoZH 已提交
508 509 510 511
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
512
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
513
    auto &vars = result->Get<GraphVars>(kGraphVars).at(i).at(p_name);
X
polish  
Xin Pan 已提交
514 515 516
    auto *out_var = new VarHandle(
        result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(),
        i, p_name, p);
C
chengduoZH 已提交
517 518 519 520 521
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
  }
}

522 523 524
void MultiDevSSAGraphBuilder::CreateFusedBroadcastOp(
    ir::Graph *result,
    const std::vector<std::unordered_set<std::string>> &bcast_varnames) const {
P
peizhilin 已提交
525
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
#else
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
#endif
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
  }

  for (size_t dev_id = 0; dev_id < bcast_varnames.size(); ++dev_id) {
    for (auto &p_name : bcast_varnames[dev_id]) {
      auto *in =
X
clean1  
Xin Pan 已提交
544
          result->Get<GraphVars>(kGraphVars).at(dev_id).at(p_name).back();
545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
      op_handle->AddInput(in);
      for (size_t out_dev_id = 0; out_dev_id < places_.size(); ++out_dev_id) {
        auto &p = places_[out_dev_id];
        auto &vars =
            result->Get<GraphVars>(kGraphVars).at(out_dev_id).at(p_name);
        auto *out_var = new VarHandle(
            result->CreateEmptyNode(p_name, ir::Node::Type::kVariable),
            vars.size(), out_dev_id, p_name, p);
        vars.emplace_back(out_var);
        op_handle->AddOutput(out_var);
      }
    }
  }
}

X
Xin Pan 已提交
560
void MultiDevSSAGraphBuilder::CreateComputationalOp(ir::Graph *result,
561
                                                    ir::Node *node,
C
chengduoZH 已提交
562
                                                    int dev_id) const {
X
Xin Pan 已提交
563
  result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
564
      new ComputationOpHandle(result->CreateOpNode(node->Op()),
565 566
                              local_scopes_[dev_id], places_[dev_id]));
  CreateOpHandleIOs(result, node, dev_id);
C
chengduoZH 已提交
567 568
}

X
Xin Pan 已提交
569
void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result,
C
chengduoZH 已提交
570
                                                const std::string &og) const {
P
peizhilin 已提交
571
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
572
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
573 574
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
575
#else
X
Xin Pan 已提交
576
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
577 578
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
579
#endif
X
clean1  
Xin Pan 已提交
580
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
Y
Yu Yang 已提交
581 582 583

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
584
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
585
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
Y
Yu Yang 已提交
586 587
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
588
    op_handle->AddInput(prev_grad);
Y
Yu Yang 已提交
589

X
Xin Pan 已提交
590
    auto var =
X
polish  
Xin Pan 已提交
591 592
        new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                      vars.size(), i, og, p);
Y
Yu Yang 已提交
593 594 595 596 597
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
}

598
void MultiDevSSAGraphBuilder::InsertDataBalanceOp(
X
Xin Pan 已提交
599
    ir::Graph *result, const std::vector<std::string> &datas) const {
P
peizhilin 已提交
600
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
601
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
602 603
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
F
fengjiayi 已提交
604
#else
X
Xin Pan 已提交
605
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
606 607
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_));
F
fengjiayi 已提交
608
#endif
X
clean1  
Xin Pan 已提交
609
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
610 611 612 613
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
    for (const std::string &d_name : datas) {
X
Xin Pan 已提交
614
      auto &vars = result->Get<GraphVars>(kGraphVars)[i][d_name];
615
      PADDLE_ENFORCE(!vars.empty());
X
clean1  
Xin Pan 已提交
616
      op_handle->AddInput(vars.back());
X
polish  
Xin Pan 已提交
617 618 619
      auto var = new VarHandle(
          result->CreateEmptyNode(d_name, ir::Node::Type::kVariable),
          vars.size(), i, d_name, p);
620 621 622 623 624 625
      vars.emplace_back(var);
      op_handle->AddOutput(var);
    }
  }
}

X
Xin Pan 已提交
626 627 628
int MultiDevSSAGraphBuilder::GetOpDeviceID(
    const ir::Graph &graph, ir::Node *node,
    const std::unordered_map<std::string, int> &sharded_var_device) const {
Y
yuyang18 已提交
629
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
C
chengduoZH 已提交
630 631
    return -1;
  }
632
  int op_role = boost::get<int>(
633
      node->Op()->GetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
634 635
  if (op_role != static_cast<int>(framework::OpRole::kOptimize)) {
    return -1;
C
chengduoZH 已提交
636
  }
637
  auto param_grad = boost::get<std::vector<std::string>>(
X
Xin Pan 已提交
638
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
639 640

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
X
Xin Pan 已提交
641
  int dev_id = GetVarDeviceID(graph, param_grad[1], sharded_var_device);
X
Xin Pan 已提交
642 643
  PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]",
                    node->Op()->Type(), param_grad[0], param_grad[1]);
644
  return dev_id;
645 646
}

X
Xin Pan 已提交
647 648 649
int MultiDevSSAGraphBuilder::GetVarDeviceID(
    const ir::Graph &graph, const std::string &varname,
    const std::unordered_map<std::string, int> &sharded_var_device) const {
X
Xin Pan 已提交
650
  auto got = sharded_var_device.find(varname);
C
chengduo 已提交
651 652 653 654 655 656
  if (got == sharded_var_device.end()) {
    auto pos = varname.find(framework::kNewGradSuffix);
    if (pos != std::string::npos) {
      got = sharded_var_device.find(varname.substr(0, pos));
    }
  }
X
Xin Pan 已提交
657
  return got == sharded_var_device.end() ? -1 : got->second;
C
chengduoZH 已提交
658 659
}

660
void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(
661 662
    ir::Graph *result, const std::string &loss_grad_name,
    ir::Node *out_var_node) const {
Y
Yu Yang 已提交
663
  for (size_t i = 0; i < places_.size(); ++i) {
Y
yuyang18 已提交
664 665
    // Insert ScaleCost OpHandle
    auto *dev_ctx = platform::DeviceContextPool::Instance().Get(places_[i]);
X
Xin Pan 已提交
666
    auto *op_handle = new ScaleLossGradOpHandle(
X
polish  
Xin Pan 已提交
667
        result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation),
Y
yuyang18 已提交
668
        local_scopes_.size(), local_scopes_[i], places_[i], dev_ctx);
X
Xin Pan 已提交
669
    result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
Y
Yu Yang 已提交
670 671 672 673 674 675 676

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

677 678
    CreateOpOutput(result, op_handle,
                   result->CreateVarNode(out_var_node->Var()), places_[i], i);
Y
Yu Yang 已提交
679 680 681
  }
}

X
Xin Pan 已提交
682
void MultiDevSSAGraphBuilder::CreateComputationalOps(ir::Graph *result,
683
                                                     ir::Node *node,
T
typhoonzero 已提交
684 685
                                                     size_t num_places) const {
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
686 687
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
X
Xin Pan 已提交
688
    result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
689
        new ComputationOpHandle(result->CreateOpNode(node->Op()), s, p));
690
    CreateOpHandleIOs(result, node, scope_idx);
Y
Yu Yang 已提交
691 692 693
  }
}

X
Xin Pan 已提交
694
VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result,
C
chengduoZH 已提交
695 696
                                                   const std::string &og,
                                                   int dst_dev_id) const {
P
peizhilin 已提交
697
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
698
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
699 700
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
701
#else
X
Xin Pan 已提交
702
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
703 704
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
705
#endif
X
clean1  
Xin Pan 已提交
706
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
C
chengduoZH 已提交
707 708 709

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
710
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
711
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
C
chengduoZH 已提交
712 713
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
714
    op_handle->AddInput(prev_grad);
C
chengduoZH 已提交
715
  }
X
Xin Pan 已提交
716
  auto &vars = result->Get<GraphVars>(kGraphVars)[dst_dev_id][og];
X
polish  
Xin Pan 已提交
717 718 719
  auto var =
      new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                    vars.size(), dst_dev_id, og, places_[dst_dev_id]);
C
chengduoZH 已提交
720 721 722 723 724
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

X
Xin Pan 已提交
725 726 727
int MultiDevSSAGraphBuilder::CreateDistTrainOp(
    ir::Graph *result, ir::Node *node,
    std::unordered_map<std::string, int> *sharded_var_device) const {
Y
Yancey1989 已提交
728
  int op_dev_id = -1;
729 730 731
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
X
Xin Pan 已提交
732
    input_var_names.push_back(input->Name());
733 734
  }
  for (ir::Node *output : node->outputs) {
X
Xin Pan 已提交
735
    output_var_names.push_back(output->Name());
736 737 738
  }

  if (node->Op()->Type() == "split_byref" ||
739 740
      node->Op()->Type() == "split_selected_rows" ||
      node->Op()->Type() == "split_ids") {
X
Xin Pan 已提交
741
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
742 743
    op_dev_id =
        GetVarDeviceID(*result, input_var_names[0], *sharded_var_device);
Y
Yancey1989 已提交
744
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
745 746
      op_dev_id = GetAppropriateDeviceID(input_var_names);
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
747
        sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
748 749
      }
    }
750
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
751
      sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
752
    }
753
  } else if (node->Op()->Type() == "concat") {
X
Xin Pan 已提交
754 755
    op_dev_id =
        GetVarDeviceID(*result, input_var_names[0], *sharded_var_device);
756
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
757
      sharded_var_device->emplace(varname, op_dev_id);
Y
yi.wu 已提交
758
    }
Y
Yancey1989 已提交
759
  } else {
760
    LOG(ERROR) << "got unexpected dist op: " << node->Op()->Type();
W
Wu Yi 已提交
761
    PADDLE_THROW(
Y
Yancey1989 已提交
762 763 764 765 766
        "the distribute training related op should be in [split_byref, "
        "concat].");
  }

  PADDLE_ENFORCE(op_dev_id != -1,
767 768
                 "can not find right place for distributed op: %s",
                 node->Op()->Type());
Y
Yancey1989 已提交
769

770
  CreateComputationalOp(result, node, op_dev_id);
Y
Yancey1989 已提交
771
  return op_dev_id;
W
Wu Yi 已提交
772 773 774
}

void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) {
X
clean1  
Xin Pan 已提交
775
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
776 777 778 779 780 781
  for (ir::Node *input : node->inputs) {
    VarHandle *var = nullptr;
    for (int place_offset = 0; place_offset < num_places; ++place_offset) {
      auto &var_holders = result->Get<GraphVars>(kGraphVars)[place_offset];
      auto &var_holder = var_holders[input->Name()];
      if (!var_holder.empty()) {
X
clean1  
Xin Pan 已提交
782
        var = *var_holder.rbegin();
W
Wu Yi 已提交
783 784 785
        op_handle->AddInput(var);
      }
    }
Y
Yancey1989 已提交
786 787 788
  }
}

789
// Create RPC related op handles that connects its in ops and out ops.
X
Xin Pan 已提交
790 791 792
int MultiDevSSAGraphBuilder::CreateRPCOp(
    ir::Graph *result, ir::Node *node,
    std::unordered_map<std::string, int> *sharded_var_device) const {
Y
Yancey1989 已提交
793
  int op_dev_id = -1;
794
  if (node->Op()->Type() == "send") {
X
Xin Pan 已提交
795
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
796 797
    op_dev_id =
        GetVarDeviceID(*result, node->inputs[0]->Name(), *sharded_var_device);
X
Xin Pan 已提交
798 799
    PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
                   "This hack no longer holds, please fix.");
Y
Yancey1989 已提交
800 801 802
    // the variable name which contains .block means it was splited by
    // split_byref op
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce &&
X
Xin Pan 已提交
803
        node->inputs[0]->Name().find(".block") == std::string::npos) {
804 805
      std::vector<std::string> input_var_names;
      for (ir::Node *n : node->inputs) {
X
Xin Pan 已提交
806
        input_var_names.push_back(n->Name());
807
      }
W
Wu Yi 已提交
808 809 810 811
      auto send_param_grad = boost::get<std::vector<std::string>>(
          node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
      op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
M
minqiyang 已提交
812 813
      VLOG(10) << "send grad " << input_var_names[0] << " origin "
               << send_param_grad[1] << " place: " << op_dev_id;
814
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
815
        sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
816
      }
X
Xin Pan 已提交
817
      sharded_var_device->emplace(send_param_grad[1], op_dev_id);
Y
Yancey1989 已提交
818
    }
819 820 821
  } else if (node->Op()->Type() == "recv") {
    std::vector<std::string> output_var_names;
    for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
822
      output_var_names.push_back(n->Name());
823
    }
W
Wu Yi 已提交
824 825 826
    auto recv_param_grad = boost::get<std::vector<std::string>>(
        node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
    if (recv_param_grad.size() == 2U) {
X
Xin Pan 已提交
827 828
      op_dev_id =
          GetVarDeviceID(*result, recv_param_grad[1], *sharded_var_device);
M
minqiyang 已提交
829 830 831
      VLOG(10) << "recv param " << recv_param_grad[0]
               << " get grad place: " << recv_param_grad[1]
               << " place: " << op_dev_id;
W
Wu Yi 已提交
832 833 834
    } else {
      op_dev_id = GetAppropriateDeviceID(output_var_names);
    }
835
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
836
      sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
837 838
    }
  } else {
W
Wu Yi 已提交
839
    // send_barrier, fetch_barrier will run on place 0;
Y
Yancey1989 已提交
840 841 842 843
    op_dev_id = 0;
  }

  PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s",
844
                 node->Op()->Type());
X
Xin Pan 已提交
845
  result->Get<GraphOps>(kGraphOps).emplace_back(new RPCOpHandle(
846 847
      result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id],
      node->Op()->Type(), places_[op_dev_id]));
Y
fix pe  
Yancey1989 已提交
848

W
Wu Yi 已提交
849 850
  if (node->Op()->Type() == "send") {
    CreateOpHandleIOs(result, node, op_dev_id);
Y
Yancey1989 已提交
851
  } else {
W
Wu Yi 已提交
852 853 854
    // send_barrier, recv, fetch_barrier's inputs are deps var, get them from
    // all places
    auto p = places_[op_dev_id];
X
clean1  
Xin Pan 已提交
855
    auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
856 857
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
Y
Yancey1989 已提交
858

W
Wu Yi 已提交
859 860 861 862
    SetOpInputsAllPlaces(result, node, places_.size());
    for (ir::Node *output : node->outputs) {
      int outvar_dev_id = op_dev_id;
      if (node->Op()->Type() == "fetch_barrier") {
X
Xin Pan 已提交
863 864
        outvar_dev_id =
            GetVarDeviceID(*result, output->Name(), *sharded_var_device);
W
Wu Yi 已提交
865 866 867 868 869 870 871 872 873 874 875 876 877
        PADDLE_ENFORCE_NE(outvar_dev_id, -1);
      }
      p = places_[outvar_dev_id];
      ir::Node *new_node = nullptr;
      if (output->Var()) {
        new_node = result->CreateVarNode(output->Var());
      } else {
        new_node =
            result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
      }
      CreateOpOutput(result, op_handle, new_node, p, outvar_dev_id);
    }
  }
Y
Yancey1989 已提交
878
  return op_dev_id;
Y
Yu Yang 已提交
879 880
}

881
bool MultiDevSSAGraphBuilder::IsScaleLossOp(ir::Node *node) const {
Y
yuyang18 已提交
882
  return boost::get<int>(
883
             node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Fix bug  
yuyang18 已提交
884 885 886
             (static_cast<int>(OpRole::kBackward) |
              static_cast<int>(OpRole::kLoss)) &&
         !loss_var_name_.empty();  // If loss_var is empty. This is test mode
Y
Yu Yang 已提交
887
}
Y
Yu Yang 已提交
888 889 890
}  // namespace details
}  // namespace framework
}  // namespace paddle
X
Xin Pan 已提交
891

X
Xin Pan 已提交
892
REGISTER_PASS(multi_devices_pass,
X
Xin Pan 已提交
893 894 895 896 897 898
              paddle::framework::details::MultiDevSSAGraphBuilder)
    .RequirePassAttr(paddle::framework::details::kLossVarName)
    .RequirePassAttr(paddle::framework::details::kPlaces)
    .RequirePassAttr(paddle::framework::details::kParams)
    .RequirePassAttr(paddle::framework::details::kLocalScopes)
    .RequirePassAttr(paddle::framework::details::kStrategy);