multi_devices_graph_pass.cc 34.8 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
C
chengduoZH 已提交
14
#include <algorithm>
Y
Yancey1989 已提交
15
#include <fstream>
C
chengduoZH 已提交
16
#include <string>
C
chengduoZH 已提交
17
#include <utility>
C
chengduoZH 已提交
18 19
#include <vector>

20
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
C
chengduoZH 已提交
21
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
22
#include "paddle/fluid/framework/details/computation_op_handle.h"
23
#include "paddle/fluid/framework/details/data_balance_op_handle.h"
24
#include "paddle/fluid/framework/details/fused_broadcast_op_handle.h"
X
Xin Pan 已提交
25
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
C
chengduoZH 已提交
26
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yancey1989 已提交
27
#include "paddle/fluid/framework/details/rpc_op_handle.h"
Y
Yu Yang 已提交
28
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
X
better  
Xin Pan 已提交
29
#include "paddle/fluid/framework/ir/graph_helper.h"
X
Xin Pan 已提交
30
#include "paddle/fluid/framework/ir/node.h"
Y
Fix bug  
yuyang18 已提交
31
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
32
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
33

Y
Yu Yang 已提交
34 35 36
namespace paddle {
namespace framework {
namespace details {
X
Xin Pan 已提交
37

X
Xin Pan 已提交
38
namespace {
X
Xin Pan 已提交
39
// TODO(panyx0718): Clean this up as well.
X
Xin Pan 已提交
40 41 42 43 44
// all operators. NOTE that even we use a vector here, the operators is
// unordered.
typedef std::vector<OpHandleBase *> GraphOps;
const char kGraphOps[] = "ops";

X
Xin Pan 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
void PolishGraphToSupportDataHazards(ir::Graph *graph) {
  for (auto &var_map : graph->Get<GraphVars>(kGraphVars)) {
    for (auto &name_pair : var_map) {
      if (name_pair.second.size() <= 1) {
        continue;
      }
      auto it_new = name_pair.second.rbegin();
      auto it_old = name_pair.second.rbegin();
      ++it_old;
      for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) {
        OpHandleBase *write_op = (*it_new)->GeneratedOp();
        const auto &read_ops = (*it_old)->PendingOps();

        for (auto *read_op : read_ops) {
          // Manually add a dependency var from read_op to write_op;
          if (read_op == write_op) {
            // Read Write is the same op.
            continue;
          }
          bool has_dep = false;
          for (auto *r_out : read_op->Outputs()) {
            for (auto *w_in : write_op->Inputs()) {
              if (r_out->Node() == w_in->Node()) {
                has_dep = true;
                break;
              }
            }
          }
          if (has_dep) continue;

          auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar());
          read_op->AddOutput(dep_var);
          write_op->AddInput(dep_var);
          graph->Get<GraphDepVars>(kGraphDepVars).emplace(dep_var);
        }
      }
    }
  }
}

VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node,
                                      const platform::Place &place,
                                      size_t place_offset) {
  auto &var_holders = graph->Get<GraphVars>(kGraphVars)[place_offset];
  auto &var_holder = var_holders[node->Name()];
  VarHandle *var = nullptr;
  if (var_holder.empty()) {
    if (node->Var()) {
      var = new VarHandle(graph->CreateVarNode(node->Var()), 0, place_offset,
                          node->Name(), place);
    } else {
      var = new VarHandle(
          graph->CreateEmptyNode(node->Name(), ir::Node::Type::kVariable), 0,
          place_offset, node->Name(), place);
    }
    var_holder.emplace_back(var);
  } else {
X
clean1  
Xin Pan 已提交
102
    var = *var_holder.rbegin();
X
Xin Pan 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
  }
  return var;
}

void CreateOpOutput(ir::Graph *graph, OpHandleBase *op_handle,
                    ir::Node *new_node, const platform::Place &place,
                    size_t place_offset) {
  auto &vars =
      graph->Get<GraphVars>(kGraphVars)[place_offset][new_node->Name()];
  size_t version = vars.size();
  auto var =
      new VarHandle(new_node, version, place_offset, new_node->Name(), place);
  vars.emplace_back(var);
  op_handle->AddOutput(var);
}

void AddOutputToLeafOps(ir::Graph *graph) {
  for (auto &op : graph->Get<GraphOps>(kGraphOps)) {
    if (!op->Outputs().empty()) {
      continue;
    }
    auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar());
    graph->Get<GraphDepVars>(kGraphDepVars).emplace(dummy_leaf);
    op->AddOutput(dummy_leaf);
  }
}
}  // namespace
Y
Yu Yang 已提交
130

X
Xin Pan 已提交
131 132 133 134
static const char kLossVarName[] = "loss_var_name";
static const char kPlaces[] = "places";
static const char kLocalScopes[] = "local_scopes";
static const char kStrategy[] = "strategy";
Y
Yancey1989 已提交
135
static const char kNumParallelDevices[] = "num_parallel_devices";
X
Xin Pan 已提交
136

X
Xin Pan 已提交
137
void MultiDevSSAGraphBuilder::Init() const {
X
clean  
Xin Pan 已提交
138 139 140
  all_vars_.clear();
  balance_vars_.clear();

X
Xin Pan 已提交
141 142 143 144
  loss_var_name_ = Get<const std::string>(kLossVarName);
  places_ = Get<const std::vector<platform::Place>>(kPlaces);
  local_scopes_ = Get<const std::vector<Scope *>>(kLocalScopes);
  strategy_ = Get<const BuildStrategy>(kStrategy);
P
peizhilin 已提交
145
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
146
  nccl_ctxs_ = &Get<platform::NCCLContextMap>("nccl_ctxs");
Y
Yu Yang 已提交
147
#endif
X
Xin Pan 已提交
148

Y
Yancey1989 已提交
149
  balance_vars_.resize(places_.size(), 0);
Y
yuyang18 已提交
150 151 152 153 154
  if (strategy_.enable_data_balance_ && places_.size() == 1) {
    LOG(WARNING) << "It is no need to enable data balance when there is only "
                    "one place. enable_data_balance is set to False.";
    strategy_.enable_data_balance_ = false;
  }
Y
Yu Yang 已提交
155 156
}

X
Xin Pan 已提交
157 158
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(ir::Graph *result,
                                                ir::Node *node,
Y
Yu Yang 已提交
159 160
                                                size_t place_id) const {
  auto p = places_[place_id];
X
clean1  
Xin Pan 已提交
161
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
X
Xin Pan 已提交
162 163
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
T
wip  
typhoonzero 已提交
164

165 166
  for (ir::Node *input : node->inputs) {
    VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id);
T
wip  
typhoonzero 已提交
167 168 169
    op_handle->AddInput(var);
  }

170
  for (ir::Node *output : node->outputs) {
X
polish  
Xin Pan 已提交
171 172 173 174 175 176 177 178
    ir::Node *new_node = nullptr;
    if (output->Var()) {
      new_node = result->CreateVarNode(output->Var());
    } else {
      new_node =
          result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
    }
    CreateOpOutput(result, op_handle, new_node, p, place_id);
T
wip  
typhoonzero 已提交
179 180
  }
}
Y
fix pe  
Yancey1989 已提交
181 182

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainSendVars(
X
Xin Pan 已提交
183
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
184
  std::vector<std::string> send_vars;
Y
Yancey1989 已提交
185 186
  // since parameters are all in block 0,
  // it's enough to only scan send ops in block 0
187 188
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
189 190
    // TODO(Yancey1989): use a graceful method to find send op,
    // instead of the the hard code string
191
    if (op->Type() == "send") {
Y
fix pe  
Yancey1989 已提交
192 193 194 195 196 197 198 199 200 201
      auto op_vars = op->InputArgumentNames();
      send_vars.reserve(send_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      send_vars.insert(send_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return send_vars;
}

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainRecvVars(
X
Xin Pan 已提交
202
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
203
  std::vector<std::string> recv_vars;
204 205
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
206 207 208
    // TODO(Yancey1989): use a graceful method to find recv op,
    // instead of the hard code string
    if (op->Type() == "recv") {
Y
fix pe  
Yancey1989 已提交
209 210 211 212 213 214 215 216 217
      auto op_vars = op->OutputArgumentNames();
      recv_vars.reserve(recv_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      recv_vars.insert(recv_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return recv_vars;
}

Y
Yancey1989 已提交
218 219 220 221
size_t MultiDevSSAGraphBuilder::GetAppropriateDeviceID(
    const std::vector<std::string> &var_names) const {
  int64_t numel_sum = 0;
  for (auto var_name : var_names) {
X
Xin Pan 已提交
222
    if (all_vars_.find(var_name) == all_vars_.end()) continue;
Y
Yancey1989 已提交
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
    auto var_desc = all_vars_.at(var_name);
    PADDLE_ENFORCE_NOT_NULL(var_desc);
    auto dim = framework::make_ddim(var_desc->GetShape());
    int64_t numel = framework::product(dim);
    PADDLE_ENFORCE_GT(numel, 0);
    numel_sum += numel;
  }

  auto smallest =
      std::min_element(std::begin(balance_vars_), std::end(balance_vars_));
  size_t dev_id =
      static_cast<size_t>(std::distance(std::begin(balance_vars_), smallest));
  balance_vars_[dev_id] += numel_sum;
  return dev_id;
}

X
better  
Xin Pan 已提交
239 240 241 242 243
// Topology sort the graph nodes from inputs to outputs.
// Since SSAGraphBuilder depends on forward/backward nodes to assign devices
// to parameter/gradients before optimizer ops, topo sort is insufficient. (
// some optimizer ops might not depend on any nodes), we manually move all
// optimizer nodes after last backward nodes.
X
Xin Pan 已提交
244 245 246
// However, the assumption by SSAGraphBuilder should be relaxed in the future.
std::vector<ir::Node *> SortOpsAndDelayOptimizeOp(const ir::Graph &graph) {
  std::vector<ir::Node *> ret = ir::TopologySortOperations(graph);
X
better  
Xin Pan 已提交
247 248 249 250 251
  size_t last_backward = 0;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (boost::get<int>(
            ret[i]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
        static_cast<int>(OpRole::kBackward)) {
X
Xin Pan 已提交
252
      last_backward = i;
X
better  
Xin Pan 已提交
253 254 255
    }
  }

X
Xin Pan 已提交
256 257 258 259
  std::vector<ir::Node *> optimize_ops;
  std::vector<ir::Node *> sorted_ret;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (i < last_backward) {
X
Xin Pan 已提交
260 261 262
      if (static_cast<bool>(boost::get<int>(ret[i]->Op()->GetAttr(
                                OpProtoAndCheckerMaker::OpRoleAttrName())) &
                            static_cast<int>(OpRole::kOptimize))) {
X
Xin Pan 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
        optimize_ops.push_back(ret[i]);
      } else {
        sorted_ret.push_back(ret[i]);
      }
    } else if (i == last_backward) {
      sorted_ret.push_back(ret[i]);
      // Verify that no operations before optimize ops depends on optimize ops.
      std::unordered_set<ir::Node *> optimize_set(optimize_ops.begin(),
                                                  optimize_ops.end());
      for (ir::Node *n : sorted_ret) {
        for (ir::Node *in : n->inputs) {
          for (ir::Node *pre_n : in->inputs) {
            PADDLE_ENFORCE(optimize_set.find(pre_n) == optimize_set.end(),
                           "optimize operations cannot be depended by forward "
                           "or backward node %s -> %s",
                           pre_n->Name(), n->Name());
          }
        }
X
Xin Pan 已提交
281
      }
X
Xin Pan 已提交
282 283 284 285
      sorted_ret.insert(sorted_ret.end(), optimize_ops.begin(),
                        optimize_ops.end());
    } else {
      sorted_ret.push_back(ret[i]);
X
Xin Pan 已提交
286 287
    }
  }
X
better  
Xin Pan 已提交
288 289 290
  return sorted_ret;
}

X
Xin Pan 已提交
291
std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilder::ApplyImpl(
X
Xin Pan 已提交
292
    std::unique_ptr<ir::Graph> graph) const {
X
Xin Pan 已提交
293
  Init();
X
Xin Pan 已提交
294
  // Give the topology sort order and rebuild the graph structure.
X
better  
Xin Pan 已提交
295
  std::vector<ir::Node *> sorted_ops = SortOpsAndDelayOptimizeOp(*graph);
X
Xin Pan 已提交
296 297
  auto nodes = graph->ReleaseNodes();
  ir::Graph &result = *graph;
298

Y
Yancey1989 已提交
299
  size_t num_parallel_devices = Get<size_t>(kNumParallelDevices);
300

301
  for (auto &node : nodes) {
X
Xin Pan 已提交
302
    if (node->IsVar() && node->Var()) {
X
Xin Pan 已提交
303
      all_vars_.emplace(node->Name(), node->Var());
304
    }
C
fix ci  
chengduoZH 已提交
305
  }
C
chengduoZH 已提交
306
  std::unordered_set<std::string> og_has_been_broadcast;
Y
Yu Yang 已提交
307 308

  // We cannot invoke resize. It is a bug of GCC 4.8
X
Xin Pan 已提交
309 310 311
  result.Set(kGraphVars, new GraphVars(places_.size()));
  result.Set(kGraphDepVars, new GraphDepVars);
  result.Set(kGraphOps, new GraphOps);
312

Y
fix pe  
Yancey1989 已提交
313
  // find send/recv vars so that we can place the distributed training
314
  // related op in the place 0
X
Xin Pan 已提交
315 316
  auto send_vars = FindDistTrainSendVars(sorted_ops);
  auto recv_vars = FindDistTrainRecvVars(sorted_ops);
T
typhoonzero 已提交
317

C
chengduoZH 已提交
318 319 320
  std::vector<std::unordered_set<std::string>> bcast_var_name_set;
  bcast_var_name_set.resize(places_.size());

C
chengduoZH 已提交
321
  size_t cur_device_id = 0;
Y
Yu Yang 已提交
322
  bool is_forwarding = true;
Y
Yancey1989 已提交
323
  bool is_dist_train = false;
324

X
Xin Pan 已提交
325 326
  std::unordered_map<std::string, int> sharded_var_device;

X
better  
Xin Pan 已提交
327
  for (ir::Node *node : sorted_ops) {
Y
Yancey1989 已提交
328
    if (boost::get<int>(
329
            node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Yancey1989 已提交
330
        static_cast<int>(OpRole::kRPC)) {
X
Xin Pan 已提交
331
      int op_dev_id = CreateRPCOp(&result, node, &sharded_var_device);
Y
Yancey1989 已提交
332 333 334 335 336 337 338 339 340 341 342 343
      PADDLE_ENFORCE(op_dev_id != -1,
                     "Can not schedule the RPC operator to the right place.");
      if (node->Op()->Type() == "recv") {
        auto recv_vars_attr =
            boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
                OpProtoAndCheckerMaker::OpRoleVarAttrName()));
        PADDLE_ENFORCE(recv_vars_attr.size() == 2UL);  // [parameter, gradient]
        if (recv_vars_attr[0].find(".block") == std::string::npos) {
          bcast_var_name_set[op_dev_id].emplace(recv_vars_attr[0]);
        }
      }
      is_dist_train = true;
344 345 346
    } else if (boost::get<int>(node->Op()->GetAttr(
                   OpProtoAndCheckerMaker::OpRoleAttrName())) ==
               static_cast<int>(OpRole::kDist)) {
X
Xin Pan 已提交
347
      int op_dev_id = CreateDistTrainOp(&result, node, &sharded_var_device);
Y
Yancey1989 已提交
348 349 350 351
      if (node->Op()->Type() == "concat") {
        auto origin_param_name = node->Op()->OutputArgumentNames()[0];
        bcast_var_name_set[op_dev_id].emplace(origin_param_name);
      }
X
Xin Pan 已提交
352
    } else if (IsScaleLossOp(node)) {
Y
Yu Yang 已提交
353
      // user can customize loss@grad if not use_default_grad_scale_
Y
yuyang18 已提交
354 355
      if (strategy_.gradient_scale_ !=
          BuildStrategy::GradientScaleStrategy::kCustomized) {
X
Xin Pan 已提交
356
        // TODO(paddle-dev): Why is there no input for this op_handle?
357
        auto loss_grad_name = node->Op()->OutputArgumentNames()[0];
W
Wu Yi 已提交
358 359 360
        auto out_dtype = all_vars_.at(loss_grad_name)->GetDataType();
        CreateScaleLossGradOp(&result, loss_grad_name, node->outputs[0],
                              out_dtype);
Y
Yu Yang 已提交
361
      }
362 363 364 365
      // This assumes the backward generating code will ensure IsScaleLossOp
      // is true only for the op that scale the final scalar loss.
      // It also assumes backward op will always follow the forward op in
      // the block.
Y
Yu Yang 已提交
366
      is_forwarding = false;
Y
Yu Yang 已提交
367
    } else {
X
Xin Pan 已提交
368
      int op_dev_id = GetOpDeviceID(result, node, sharded_var_device);
C
chengduo 已提交
369
      if (op_dev_id != -1) {  // This op only runs on one specific device.
X
Xin Pan 已提交
370
        CreateComputationalOp(&result, node, op_dev_id);
371
        for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
372
          sharded_var_device.emplace(n->Name(), op_dev_id);
C
chengduoZH 已提交
373
        }
C
chengduo 已提交
374 375 376
      } else {
        // This op runs on all devices, and its output may have parameter's
        // gradients.
X
Xin Pan 已提交
377
        // TODO(paddle-dev): Why is so special about "read" op?
378 379
        if (node->Op()->Type() == "read" && strategy_.enable_data_balance_) {
          node->Op()->SetAttr("throw_eof_exp", false);
X
Xin Pan 已提交
380
          CreateComputationalOps(&result, node, places_.size());
381
          const auto &data_var_names = node->Op()->Output("Out");
382
          InsertDataBalanceOp(&result, data_var_names);
F
fengjiayi 已提交
383
        } else {
X
Xin Pan 已提交
384
          CreateComputationalOps(&result, node, places_.size());
385 386
        }

Y
Yancey1989 已提交
387
        if (!is_forwarding && num_parallel_devices > 1) {
C
chengduo 已提交
388 389
          // Currently, we assume that once gradient is generated, it can be
          // broadcast, and each gradient is only broadcast once.
390
          if (static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
C
chengduo 已提交
391 392 393
                                    OpProtoAndCheckerMaker::OpRoleAttrName())) &
                                static_cast<int>(OpRole::kBackward))) {
            try {
394 395
              auto backward_vars = boost::get<std::vector<std::string>>(
                  node->Op()->GetNullableAttr(
C
chengduo 已提交
396
                      OpProtoAndCheckerMaker::OpRoleVarAttrName()));
Y
yuyang18 已提交
397

C
chengduo 已提交
398
              PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
Y
yuyang18 已提交
399

C
chengduo 已提交
400 401 402
              for (size_t i = 0; i < backward_vars.size(); i += 2) {
                auto &p_name = backward_vars[i];
                auto &g_name = backward_vars[i + 1];
M
minqiyang 已提交
403
                VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;
Y
yuyang18 已提交
404

C
chengduo 已提交
405 406 407 408
                switch (strategy_.reduce_) {
                  case BuildStrategy::ReduceStrategy::kReduce:
                    cur_device_id = GetAppropriateDeviceID({g_name});
                    CreateReduceOp(&result, g_name, cur_device_id);
X
Xin Pan 已提交
409
                    sharded_var_device.emplace(g_name, cur_device_id);
Y
Yancey1989 已提交
410 411 412
                    if (!is_dist_train) {
                      bcast_var_name_set[cur_device_id].emplace(p_name);
                    }
C
chengduo 已提交
413 414 415 416 417 418 419 420 421 422 423 424 425
                    break;
                  case BuildStrategy::ReduceStrategy::kAllReduce:
                    if (IsSparseGradient(g_name)) {
                      CreateReduceOp(&result, g_name, 0);
                      CreateBroadcastOp(&result, g_name, 0);
                    } else {
                      InsertAllReduceOp(&result, g_name);
                    }
                    break;
                  default:
                    LOG(FATAL) << "Unknown reduce strategy ";
                    break;
                }
Y
yuyang18 已提交
426
              }
C
chengduo 已提交
427
            } catch (boost::bad_get e) {
C
chengduoZH 已提交
428
            }
Y
Yu Yang 已提交
429 430 431 432 433
          }
        }
      }
    }
  }
434
  bool use_gpu = false;
P
peizhilin 已提交
435
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
436 437 438
  use_gpu = nccl_ctxs_ != nullptr;
#endif

Y
Yancey1989 已提交
439 440 441 442 443
  // Insert broadcast operators principle:
  // 1. Broadcast optimized parameters in Reduce strategy;
  // 2. No need broadcast optimized parameters in AllReduce strategy because of
  //    the optimization sub-graph would be run on every GPU;
  // 3. Allways broadcast received parameters in Distribute Training.
Y
Yancey1989 已提交
444 445 446
  if ((use_gpu &&
       strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce) ||
      is_dist_train) {
447 448 449 450 451 452 453 454
    if (strategy_.fuse_broadcast_op_) {
      CreateFusedBroadcastOp(&result, bcast_var_name_set);
    } else {
      for (size_t dev_id = 0; dev_id < bcast_var_name_set.size(); ++dev_id) {
        auto &to_bcast_set = bcast_var_name_set[dev_id];
        for (auto &bcast_name : to_bcast_set) {
          CreateBroadcastOp(&result, bcast_name, dev_id);
        }
455
      }
C
chengduoZH 已提交
456 457
    }
  }
Y
Yu Yang 已提交
458
  /*
X
Xin Pan 已提交
459 460 461
  Dependency graph has been constructed. However, there are still data
  hazards need to be handled.
 */
Y
Yu Yang 已提交
462
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
463

Y
Yu Yang 已提交
464 465 466 467
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);
X
Xin Pan 已提交
468
  result.Erase<GraphOps>(kGraphOps);
Q
qiaolongfei 已提交
469
  return graph;
Y
Yu Yang 已提交
470 471
}

Y
Yancey1989 已提交
472 473 474
bool MultiDevSSAGraphBuilder::IsSparseGradient(const std::string &og) const {
  PADDLE_ENFORCE(all_vars_.count(og) != 0);
  if (all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS) {
C
fix ci  
chengduoZH 已提交
475 476 477
    return true;
  }
  return false;
478 479
}

480 481
void MultiDevSSAGraphBuilder::SetCommunicationContext(
    OpHandleBase *op_handle, const platform::Place &p) const {
P
peizhilin 已提交
482
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
483 484 485 486 487 488 489 490 491 492
  if (nccl_ctxs_ == nullptr) {
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
  }
#else
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
#endif
}

X
Xin Pan 已提交
493
void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result,
C
chengduoZH 已提交
494
                                                const std::string &p_name,
C
chengduoZH 已提交
495
                                                size_t src_dev_id) const {
P
peizhilin 已提交
496
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
polish  
Xin Pan 已提交
497 498 499
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
C
chengduoZH 已提交
500
#else
X
polish  
Xin Pan 已提交
501 502 503
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
C
chengduoZH 已提交
504
#endif
X
Xin Pan 已提交
505
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
X
Xin Pan 已提交
506

X
Xin Pan 已提交
507
  auto *in =
X
clean1  
Xin Pan 已提交
508
      result->Get<GraphVars>(kGraphVars).at(src_dev_id).at(p_name).back();
C
chengduoZH 已提交
509 510 511 512
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
513
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
514
    auto &vars = result->Get<GraphVars>(kGraphVars).at(i).at(p_name);
X
polish  
Xin Pan 已提交
515 516 517
    auto *out_var = new VarHandle(
        result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(),
        i, p_name, p);
C
chengduoZH 已提交
518 519 520 521 522
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
  }
}

523 524 525
void MultiDevSSAGraphBuilder::CreateFusedBroadcastOp(
    ir::Graph *result,
    const std::vector<std::unordered_set<std::string>> &bcast_varnames) const {
P
peizhilin 已提交
526
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
#else
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
#endif
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
  }

  for (size_t dev_id = 0; dev_id < bcast_varnames.size(); ++dev_id) {
    for (auto &p_name : bcast_varnames[dev_id]) {
      auto *in =
X
clean1  
Xin Pan 已提交
545
          result->Get<GraphVars>(kGraphVars).at(dev_id).at(p_name).back();
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
      op_handle->AddInput(in);
      for (size_t out_dev_id = 0; out_dev_id < places_.size(); ++out_dev_id) {
        auto &p = places_[out_dev_id];
        auto &vars =
            result->Get<GraphVars>(kGraphVars).at(out_dev_id).at(p_name);
        auto *out_var = new VarHandle(
            result->CreateEmptyNode(p_name, ir::Node::Type::kVariable),
            vars.size(), out_dev_id, p_name, p);
        vars.emplace_back(out_var);
        op_handle->AddOutput(out_var);
      }
    }
  }
}

X
Xin Pan 已提交
561
void MultiDevSSAGraphBuilder::CreateComputationalOp(ir::Graph *result,
562
                                                    ir::Node *node,
C
chengduoZH 已提交
563
                                                    int dev_id) const {
X
Xin Pan 已提交
564
  result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
565
      new ComputationOpHandle(result->CreateOpNode(node->Op()),
S
sneaxiy 已提交
566
                              local_scopes_[dev_id], places_[dev_id], dev_id));
567
  CreateOpHandleIOs(result, node, dev_id);
C
chengduoZH 已提交
568 569
}

X
Xin Pan 已提交
570
void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result,
C
chengduoZH 已提交
571
                                                const std::string &og) const {
P
peizhilin 已提交
572
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
573
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
574 575
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
576
#else
X
Xin Pan 已提交
577
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
578 579
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
580
#endif
X
clean1  
Xin Pan 已提交
581
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
Y
Yu Yang 已提交
582 583 584

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
585
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
586
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
Y
Yu Yang 已提交
587 588
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
589
    op_handle->AddInput(prev_grad);
Y
Yu Yang 已提交
590

X
Xin Pan 已提交
591
    auto var =
X
polish  
Xin Pan 已提交
592 593
        new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                      vars.size(), i, og, p);
Y
Yu Yang 已提交
594 595 596 597 598
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
}

599
void MultiDevSSAGraphBuilder::InsertDataBalanceOp(
X
Xin Pan 已提交
600
    ir::Graph *result, const std::vector<std::string> &datas) const {
P
peizhilin 已提交
601
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
602
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
603 604
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
F
fengjiayi 已提交
605
#else
X
Xin Pan 已提交
606
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
607 608
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_));
F
fengjiayi 已提交
609
#endif
X
clean1  
Xin Pan 已提交
610
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
611 612 613 614
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
    for (const std::string &d_name : datas) {
X
Xin Pan 已提交
615
      auto &vars = result->Get<GraphVars>(kGraphVars)[i][d_name];
616
      PADDLE_ENFORCE(!vars.empty());
X
clean1  
Xin Pan 已提交
617
      op_handle->AddInput(vars.back());
X
polish  
Xin Pan 已提交
618 619 620
      auto var = new VarHandle(
          result->CreateEmptyNode(d_name, ir::Node::Type::kVariable),
          vars.size(), i, d_name, p);
621 622 623 624 625 626
      vars.emplace_back(var);
      op_handle->AddOutput(var);
    }
  }
}

X
Xin Pan 已提交
627 628 629
int MultiDevSSAGraphBuilder::GetOpDeviceID(
    const ir::Graph &graph, ir::Node *node,
    const std::unordered_map<std::string, int> &sharded_var_device) const {
Y
yuyang18 已提交
630
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
C
chengduoZH 已提交
631 632
    return -1;
  }
633
  int op_role = boost::get<int>(
634
      node->Op()->GetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
635 636
  if (op_role != static_cast<int>(framework::OpRole::kOptimize)) {
    return -1;
C
chengduoZH 已提交
637
  }
638
  auto param_grad = boost::get<std::vector<std::string>>(
X
Xin Pan 已提交
639
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
640 641

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
X
Xin Pan 已提交
642
  int dev_id = GetVarDeviceID(graph, param_grad[1], sharded_var_device);
X
Xin Pan 已提交
643 644
  PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]",
                    node->Op()->Type(), param_grad[0], param_grad[1]);
645
  return dev_id;
646 647
}

X
Xin Pan 已提交
648 649 650
int MultiDevSSAGraphBuilder::GetVarDeviceID(
    const ir::Graph &graph, const std::string &varname,
    const std::unordered_map<std::string, int> &sharded_var_device) const {
X
Xin Pan 已提交
651
  auto got = sharded_var_device.find(varname);
C
chengduo 已提交
652 653 654 655 656 657
  if (got == sharded_var_device.end()) {
    auto pos = varname.find(framework::kNewGradSuffix);
    if (pos != std::string::npos) {
      got = sharded_var_device.find(varname.substr(0, pos));
    }
  }
X
Xin Pan 已提交
658
  return got == sharded_var_device.end() ? -1 : got->second;
C
chengduoZH 已提交
659 660
}

661
void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(
662
    ir::Graph *result, const std::string &loss_grad_name,
W
Wu Yi 已提交
663
    ir::Node *out_var_node, proto::VarType::Type dtype) const {
Y
Yancey1989 已提交
664
  size_t num_parallel_devices = Get<size_t>("num_parallel_devices");
Y
Yu Yang 已提交
665
  for (size_t i = 0; i < places_.size(); ++i) {
Y
yuyang18 已提交
666 667
    // Insert ScaleCost OpHandle
    auto *dev_ctx = platform::DeviceContextPool::Instance().Get(places_[i]);
X
Xin Pan 已提交
668
    auto *op_handle = new ScaleLossGradOpHandle(
X
polish  
Xin Pan 已提交
669
        result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation),
670
        num_parallel_devices, local_scopes_[i], places_[i], dev_ctx, dtype);
X
Xin Pan 已提交
671
    result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
Y
Yu Yang 已提交
672 673 674 675 676 677 678

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

679 680
    CreateOpOutput(result, op_handle,
                   result->CreateVarNode(out_var_node->Var()), places_[i], i);
Y
Yu Yang 已提交
681 682 683
  }
}

X
Xin Pan 已提交
684
void MultiDevSSAGraphBuilder::CreateComputationalOps(ir::Graph *result,
685
                                                     ir::Node *node,
T
typhoonzero 已提交
686 687
                                                     size_t num_places) const {
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
688 689
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
S
sneaxiy 已提交
690 691
    result->Get<GraphOps>(kGraphOps).emplace_back(new ComputationOpHandle(
        result->CreateOpNode(node->Op()), s, p, scope_idx));
692
    CreateOpHandleIOs(result, node, scope_idx);
Y
Yu Yang 已提交
693 694 695
  }
}

X
Xin Pan 已提交
696
VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result,
C
chengduoZH 已提交
697 698
                                                   const std::string &og,
                                                   int dst_dev_id) const {
P
peizhilin 已提交
699
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
700
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
701 702
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
703
#else
X
Xin Pan 已提交
704
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
705 706
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
707
#endif
X
clean1  
Xin Pan 已提交
708
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
C
chengduoZH 已提交
709 710 711

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
712
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
713
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
C
chengduoZH 已提交
714 715
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
716
    op_handle->AddInput(prev_grad);
C
chengduoZH 已提交
717
  }
X
Xin Pan 已提交
718
  auto &vars = result->Get<GraphVars>(kGraphVars)[dst_dev_id][og];
X
polish  
Xin Pan 已提交
719 720 721
  auto var =
      new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                    vars.size(), dst_dev_id, og, places_[dst_dev_id]);
C
chengduoZH 已提交
722 723 724 725 726
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

X
Xin Pan 已提交
727 728 729
int MultiDevSSAGraphBuilder::CreateDistTrainOp(
    ir::Graph *result, ir::Node *node,
    std::unordered_map<std::string, int> *sharded_var_device) const {
Y
Yancey1989 已提交
730
  int op_dev_id = -1;
731 732 733
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
X
Xin Pan 已提交
734
    input_var_names.push_back(input->Name());
735 736
  }
  for (ir::Node *output : node->outputs) {
X
Xin Pan 已提交
737
    output_var_names.push_back(output->Name());
738 739 740
  }

  if (node->Op()->Type() == "split_byref" ||
741 742
      node->Op()->Type() == "split_selected_rows" ||
      node->Op()->Type() == "split_ids") {
X
Xin Pan 已提交
743
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
744 745
    op_dev_id =
        GetVarDeviceID(*result, input_var_names[0], *sharded_var_device);
Y
Yancey1989 已提交
746
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
747 748
      op_dev_id = GetAppropriateDeviceID(input_var_names);
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
749
        sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
750 751
      }
    }
752
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
753
      sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
754
    }
755
  } else if (node->Op()->Type() == "concat") {
X
Xin Pan 已提交
756 757
    op_dev_id =
        GetVarDeviceID(*result, input_var_names[0], *sharded_var_device);
758
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
759
      sharded_var_device->emplace(varname, op_dev_id);
Y
yi.wu 已提交
760
    }
Y
Yancey1989 已提交
761
  } else {
762
    LOG(ERROR) << "got unexpected dist op: " << node->Op()->Type();
W
Wu Yi 已提交
763
    PADDLE_THROW(
Y
Yancey1989 已提交
764 765 766 767 768
        "the distribute training related op should be in [split_byref, "
        "concat].");
  }

  PADDLE_ENFORCE(op_dev_id != -1,
769 770
                 "can not find right place for distributed op: %s",
                 node->Op()->Type());
Y
Yancey1989 已提交
771

772
  CreateComputationalOp(result, node, op_dev_id);
Y
Yancey1989 已提交
773
  return op_dev_id;
W
Wu Yi 已提交
774 775 776
}

void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) {
X
clean1  
Xin Pan 已提交
777
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
778 779 780 781 782 783
  for (ir::Node *input : node->inputs) {
    VarHandle *var = nullptr;
    for (int place_offset = 0; place_offset < num_places; ++place_offset) {
      auto &var_holders = result->Get<GraphVars>(kGraphVars)[place_offset];
      auto &var_holder = var_holders[input->Name()];
      if (!var_holder.empty()) {
X
clean1  
Xin Pan 已提交
784
        var = *var_holder.rbegin();
W
Wu Yi 已提交
785 786 787
        op_handle->AddInput(var);
      }
    }
Y
Yancey1989 已提交
788 789 790
  }
}

791
// Create RPC related op handles that connects its in ops and out ops.
X
Xin Pan 已提交
792 793 794
int MultiDevSSAGraphBuilder::CreateRPCOp(
    ir::Graph *result, ir::Node *node,
    std::unordered_map<std::string, int> *sharded_var_device) const {
Y
Yancey1989 已提交
795
  int op_dev_id = -1;
796
  if (node->Op()->Type() == "send") {
X
Xin Pan 已提交
797
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
798 799
    op_dev_id =
        GetVarDeviceID(*result, node->inputs[0]->Name(), *sharded_var_device);
X
Xin Pan 已提交
800 801
    PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
                   "This hack no longer holds, please fix.");
Y
Yancey1989 已提交
802 803 804
    // the variable name which contains .block means it was splited by
    // split_byref op
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce &&
X
Xin Pan 已提交
805
        node->inputs[0]->Name().find(".block") == std::string::npos) {
806 807
      std::vector<std::string> input_var_names;
      for (ir::Node *n : node->inputs) {
X
Xin Pan 已提交
808
        input_var_names.push_back(n->Name());
809
      }
W
Wu Yi 已提交
810 811 812 813
      auto send_param_grad = boost::get<std::vector<std::string>>(
          node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
      op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
M
minqiyang 已提交
814 815
      VLOG(10) << "send grad " << input_var_names[0] << " origin "
               << send_param_grad[1] << " place: " << op_dev_id;
816
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
817
        sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
818
      }
X
Xin Pan 已提交
819
      sharded_var_device->emplace(send_param_grad[1], op_dev_id);
Y
Yancey1989 已提交
820
    }
821 822 823
  } else if (node->Op()->Type() == "recv") {
    std::vector<std::string> output_var_names;
    for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
824
      output_var_names.push_back(n->Name());
825
    }
W
Wu Yi 已提交
826 827 828
    auto recv_param_grad = boost::get<std::vector<std::string>>(
        node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
    if (recv_param_grad.size() == 2U) {
X
Xin Pan 已提交
829 830
      op_dev_id =
          GetVarDeviceID(*result, recv_param_grad[1], *sharded_var_device);
M
minqiyang 已提交
831 832 833
      VLOG(10) << "recv param " << recv_param_grad[0]
               << " get grad place: " << recv_param_grad[1]
               << " place: " << op_dev_id;
W
Wu Yi 已提交
834 835 836
    } else {
      op_dev_id = GetAppropriateDeviceID(output_var_names);
    }
837
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
838
      sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
839 840
    }
  } else {
W
Wu Yi 已提交
841
    // send_barrier, fetch_barrier will run on place 0;
Y
Yancey1989 已提交
842 843 844 845
    op_dev_id = 0;
  }

  PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s",
846
                 node->Op()->Type());
X
Xin Pan 已提交
847
  result->Get<GraphOps>(kGraphOps).emplace_back(new RPCOpHandle(
848 849
      result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id],
      node->Op()->Type(), places_[op_dev_id]));
Y
fix pe  
Yancey1989 已提交
850

W
Wu Yi 已提交
851 852
  if (node->Op()->Type() == "send") {
    CreateOpHandleIOs(result, node, op_dev_id);
Y
Yancey1989 已提交
853
  } else {
W
Wu Yi 已提交
854 855 856
    // send_barrier, recv, fetch_barrier's inputs are deps var, get them from
    // all places
    auto p = places_[op_dev_id];
X
clean1  
Xin Pan 已提交
857
    auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
858 859
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
Y
Yancey1989 已提交
860

W
Wu Yi 已提交
861 862 863 864
    SetOpInputsAllPlaces(result, node, places_.size());
    for (ir::Node *output : node->outputs) {
      int outvar_dev_id = op_dev_id;
      if (node->Op()->Type() == "fetch_barrier") {
X
Xin Pan 已提交
865 866
        outvar_dev_id =
            GetVarDeviceID(*result, output->Name(), *sharded_var_device);
Q
Qiao Longfei 已提交
867
        PADDLE_ENFORCE_NE(outvar_dev_id, -1, "output name %s", output->Name());
W
Wu Yi 已提交
868 869 870 871 872 873 874 875 876 877 878 879
      }
      p = places_[outvar_dev_id];
      ir::Node *new_node = nullptr;
      if (output->Var()) {
        new_node = result->CreateVarNode(output->Var());
      } else {
        new_node =
            result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
      }
      CreateOpOutput(result, op_handle, new_node, p, outvar_dev_id);
    }
  }
Y
Yancey1989 已提交
880
  return op_dev_id;
Y
Yu Yang 已提交
881 882
}

883
bool MultiDevSSAGraphBuilder::IsScaleLossOp(ir::Node *node) const {
Y
yuyang18 已提交
884
  return boost::get<int>(
885
             node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Fix bug  
yuyang18 已提交
886 887 888
             (static_cast<int>(OpRole::kBackward) |
              static_cast<int>(OpRole::kLoss)) &&
         !loss_var_name_.empty();  // If loss_var is empty. This is test mode
Y
Yu Yang 已提交
889
}
Y
Yu Yang 已提交
890 891 892
}  // namespace details
}  // namespace framework
}  // namespace paddle
X
Xin Pan 已提交
893

X
Xin Pan 已提交
894
REGISTER_PASS(multi_devices_pass,
X
Xin Pan 已提交
895 896 897 898
              paddle::framework::details::MultiDevSSAGraphBuilder)
    .RequirePassAttr(paddle::framework::details::kLossVarName)
    .RequirePassAttr(paddle::framework::details::kPlaces)
    .RequirePassAttr(paddle::framework::details::kLocalScopes)
899
    .RequirePassAttr(paddle::framework::details::kStrategy)
Y
Yancey1989 已提交
900
    .RequirePassAttr(paddle::framework::details::kNumParallelDevices);