multi_devices_graph_pass.cc 35.2 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
C
chengduoZH 已提交
14
#include <algorithm>
Y
Yancey1989 已提交
15
#include <fstream>
C
chengduoZH 已提交
16
#include <string>
C
chengduoZH 已提交
17
#include <utility>
C
chengduoZH 已提交
18 19
#include <vector>

20
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
C
chengduoZH 已提交
21
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
22
#include "paddle/fluid/framework/details/computation_op_handle.h"
23
#include "paddle/fluid/framework/details/data_balance_op_handle.h"
24
#include "paddle/fluid/framework/details/fused_broadcast_op_handle.h"
X
Xin Pan 已提交
25
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
C
chengduoZH 已提交
26
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yancey1989 已提交
27
#include "paddle/fluid/framework/details/rpc_op_handle.h"
Y
Yu Yang 已提交
28
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
X
better  
Xin Pan 已提交
29
#include "paddle/fluid/framework/ir/graph_helper.h"
X
Xin Pan 已提交
30
#include "paddle/fluid/framework/ir/node.h"
Y
Fix bug  
yuyang18 已提交
31
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
32
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
33

Y
Yu Yang 已提交
34 35 36
namespace paddle {
namespace framework {
namespace details {
X
Xin Pan 已提交
37

X
Xin Pan 已提交
38
namespace {
X
Xin Pan 已提交
39
// TODO(panyx0718): Clean this up as well.
X
Xin Pan 已提交
40 41 42 43 44
// all operators. NOTE that even we use a vector here, the operators is
// unordered.
typedef std::vector<OpHandleBase *> GraphOps;
const char kGraphOps[] = "ops";

X
Xin Pan 已提交
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
void PolishGraphToSupportDataHazards(ir::Graph *graph) {
  for (auto &var_map : graph->Get<GraphVars>(kGraphVars)) {
    for (auto &name_pair : var_map) {
      if (name_pair.second.size() <= 1) {
        continue;
      }
      auto it_new = name_pair.second.rbegin();
      auto it_old = name_pair.second.rbegin();
      ++it_old;
      for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) {
        OpHandleBase *write_op = (*it_new)->GeneratedOp();
        const auto &read_ops = (*it_old)->PendingOps();

        for (auto *read_op : read_ops) {
          // Manually add a dependency var from read_op to write_op;
          if (read_op == write_op) {
            // Read Write is the same op.
            continue;
          }
          bool has_dep = false;
          for (auto *r_out : read_op->Outputs()) {
            for (auto *w_in : write_op->Inputs()) {
              if (r_out->Node() == w_in->Node()) {
                has_dep = true;
                break;
              }
            }
          }
          if (has_dep) continue;

          auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar());
          read_op->AddOutput(dep_var);
          write_op->AddInput(dep_var);
          graph->Get<GraphDepVars>(kGraphDepVars).emplace(dep_var);
        }
      }
    }
  }
}

VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node,
                                      const platform::Place &place,
                                      size_t place_offset) {
  auto &var_holders = graph->Get<GraphVars>(kGraphVars)[place_offset];
  auto &var_holder = var_holders[node->Name()];
  VarHandle *var = nullptr;
  if (var_holder.empty()) {
    if (node->Var()) {
      var = new VarHandle(graph->CreateVarNode(node->Var()), 0, place_offset,
                          node->Name(), place);
    } else {
      var = new VarHandle(
          graph->CreateEmptyNode(node->Name(), ir::Node::Type::kVariable), 0,
          place_offset, node->Name(), place);
    }
    var_holder.emplace_back(var);
  } else {
X
clean1  
Xin Pan 已提交
102
    var = *var_holder.rbegin();
X
Xin Pan 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
  }
  return var;
}

void CreateOpOutput(ir::Graph *graph, OpHandleBase *op_handle,
                    ir::Node *new_node, const platform::Place &place,
                    size_t place_offset) {
  auto &vars =
      graph->Get<GraphVars>(kGraphVars)[place_offset][new_node->Name()];
  size_t version = vars.size();
  auto var =
      new VarHandle(new_node, version, place_offset, new_node->Name(), place);
  vars.emplace_back(var);
  op_handle->AddOutput(var);
}

void AddOutputToLeafOps(ir::Graph *graph) {
  for (auto &op : graph->Get<GraphOps>(kGraphOps)) {
    if (!op->Outputs().empty()) {
      continue;
    }
    auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar());
    graph->Get<GraphDepVars>(kGraphDepVars).emplace(dummy_leaf);
    op->AddOutput(dummy_leaf);
  }
}
}  // namespace
Y
Yu Yang 已提交
130

X
Xin Pan 已提交
131 132 133 134 135
static const char kLossVarName[] = "loss_var_name";
static const char kPlaces[] = "places";
static const char kParams[] = "params";
static const char kLocalScopes[] = "local_scopes";
static const char kStrategy[] = "strategy";
136
static const char kNumTrainers[] = "num_trainers";
Y
Yancey1989 已提交
137
static const char kNumLossScaled[] = "num_loss_scaled";
X
Xin Pan 已提交
138

X
Xin Pan 已提交
139
void MultiDevSSAGraphBuilder::Init() const {
X
clean  
Xin Pan 已提交
140 141 142
  all_vars_.clear();
  balance_vars_.clear();

X
Xin Pan 已提交
143 144 145 146
  loss_var_name_ = Get<const std::string>(kLossVarName);
  places_ = Get<const std::vector<platform::Place>>(kPlaces);
  local_scopes_ = Get<const std::vector<Scope *>>(kLocalScopes);
  strategy_ = Get<const BuildStrategy>(kStrategy);
P
peizhilin 已提交
147
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
148
  nccl_ctxs_ = &Get<platform::NCCLContextMap>("nccl_ctxs");
Y
Yu Yang 已提交
149
#endif
X
Xin Pan 已提交
150

X
Xin Pan 已提交
151
  for (auto &p : Get<const std::unordered_set<std::string>>(kParams)) {
Y
Yu Yang 已提交
152 153
    grad_names_.insert(GradVarName(p));
  }
Y
Yancey1989 已提交
154
  balance_vars_.resize(places_.size(), 0);
Y
yuyang18 已提交
155 156 157 158 159
  if (strategy_.enable_data_balance_ && places_.size() == 1) {
    LOG(WARNING) << "It is no need to enable data balance when there is only "
                    "one place. enable_data_balance is set to False.";
    strategy_.enable_data_balance_ = false;
  }
Y
Yu Yang 已提交
160 161
}

X
Xin Pan 已提交
162 163
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(ir::Graph *result,
                                                ir::Node *node,
Y
Yu Yang 已提交
164 165
                                                size_t place_id) const {
  auto p = places_[place_id];
X
clean1  
Xin Pan 已提交
166
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
X
Xin Pan 已提交
167 168
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
T
wip  
typhoonzero 已提交
169

170 171
  for (ir::Node *input : node->inputs) {
    VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id);
T
wip  
typhoonzero 已提交
172 173 174
    op_handle->AddInput(var);
  }

175
  for (ir::Node *output : node->outputs) {
X
polish  
Xin Pan 已提交
176 177 178 179 180 181 182 183
    ir::Node *new_node = nullptr;
    if (output->Var()) {
      new_node = result->CreateVarNode(output->Var());
    } else {
      new_node =
          result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
    }
    CreateOpOutput(result, op_handle, new_node, p, place_id);
T
wip  
typhoonzero 已提交
184 185
  }
}
Y
fix pe  
Yancey1989 已提交
186 187

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainSendVars(
X
Xin Pan 已提交
188
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
189
  std::vector<std::string> send_vars;
Y
Yancey1989 已提交
190 191
  // since parameters are all in block 0,
  // it's enough to only scan send ops in block 0
192 193
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
194 195
    // TODO(Yancey1989): use a graceful method to find send op,
    // instead of the the hard code string
196
    if (op->Type() == "send") {
Y
fix pe  
Yancey1989 已提交
197 198 199 200 201 202 203 204 205 206
      auto op_vars = op->InputArgumentNames();
      send_vars.reserve(send_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      send_vars.insert(send_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return send_vars;
}

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainRecvVars(
X
Xin Pan 已提交
207
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
208
  std::vector<std::string> recv_vars;
209 210
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
211 212 213
    // TODO(Yancey1989): use a graceful method to find recv op,
    // instead of the hard code string
    if (op->Type() == "recv") {
Y
fix pe  
Yancey1989 已提交
214 215 216 217 218 219 220 221 222
      auto op_vars = op->OutputArgumentNames();
      recv_vars.reserve(recv_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      recv_vars.insert(recv_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return recv_vars;
}

Y
Yancey1989 已提交
223 224 225 226
size_t MultiDevSSAGraphBuilder::GetAppropriateDeviceID(
    const std::vector<std::string> &var_names) const {
  int64_t numel_sum = 0;
  for (auto var_name : var_names) {
X
Xin Pan 已提交
227
    if (all_vars_.find(var_name) == all_vars_.end()) continue;
Y
Yancey1989 已提交
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
    auto var_desc = all_vars_.at(var_name);
    PADDLE_ENFORCE_NOT_NULL(var_desc);
    auto dim = framework::make_ddim(var_desc->GetShape());
    int64_t numel = framework::product(dim);
    PADDLE_ENFORCE_GT(numel, 0);
    numel_sum += numel;
  }

  auto smallest =
      std::min_element(std::begin(balance_vars_), std::end(balance_vars_));
  size_t dev_id =
      static_cast<size_t>(std::distance(std::begin(balance_vars_), smallest));
  balance_vars_[dev_id] += numel_sum;
  return dev_id;
}

X
better  
Xin Pan 已提交
244 245 246 247 248
// Topology sort the graph nodes from inputs to outputs.
// Since SSAGraphBuilder depends on forward/backward nodes to assign devices
// to parameter/gradients before optimizer ops, topo sort is insufficient. (
// some optimizer ops might not depend on any nodes), we manually move all
// optimizer nodes after last backward nodes.
X
Xin Pan 已提交
249 250 251
// However, the assumption by SSAGraphBuilder should be relaxed in the future.
std::vector<ir::Node *> SortOpsAndDelayOptimizeOp(const ir::Graph &graph) {
  std::vector<ir::Node *> ret = ir::TopologySortOperations(graph);
X
better  
Xin Pan 已提交
252 253 254 255 256
  size_t last_backward = 0;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (boost::get<int>(
            ret[i]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
        static_cast<int>(OpRole::kBackward)) {
X
Xin Pan 已提交
257
      last_backward = i;
X
better  
Xin Pan 已提交
258 259 260
    }
  }

X
Xin Pan 已提交
261 262 263 264
  std::vector<ir::Node *> optimize_ops;
  std::vector<ir::Node *> sorted_ret;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (i < last_backward) {
X
Xin Pan 已提交
265 266 267
      if (static_cast<bool>(boost::get<int>(ret[i]->Op()->GetAttr(
                                OpProtoAndCheckerMaker::OpRoleAttrName())) &
                            static_cast<int>(OpRole::kOptimize))) {
X
Xin Pan 已提交
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
        optimize_ops.push_back(ret[i]);
      } else {
        sorted_ret.push_back(ret[i]);
      }
    } else if (i == last_backward) {
      sorted_ret.push_back(ret[i]);
      // Verify that no operations before optimize ops depends on optimize ops.
      std::unordered_set<ir::Node *> optimize_set(optimize_ops.begin(),
                                                  optimize_ops.end());
      for (ir::Node *n : sorted_ret) {
        for (ir::Node *in : n->inputs) {
          for (ir::Node *pre_n : in->inputs) {
            PADDLE_ENFORCE(optimize_set.find(pre_n) == optimize_set.end(),
                           "optimize operations cannot be depended by forward "
                           "or backward node %s -> %s",
                           pre_n->Name(), n->Name());
          }
        }
X
Xin Pan 已提交
286
      }
X
Xin Pan 已提交
287 288 289 290
      sorted_ret.insert(sorted_ret.end(), optimize_ops.begin(),
                        optimize_ops.end());
    } else {
      sorted_ret.push_back(ret[i]);
X
Xin Pan 已提交
291 292
    }
  }
X
better  
Xin Pan 已提交
293 294 295
  return sorted_ret;
}

X
Xin Pan 已提交
296
std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilder::ApplyImpl(
X
Xin Pan 已提交
297
    std::unique_ptr<ir::Graph> graph) const {
X
Xin Pan 已提交
298
  Init();
X
Xin Pan 已提交
299
  // Give the topology sort order and rebuild the graph structure.
X
better  
Xin Pan 已提交
300
  std::vector<ir::Node *> sorted_ops = SortOpsAndDelayOptimizeOp(*graph);
X
Xin Pan 已提交
301 302
  auto nodes = graph->ReleaseNodes();
  ir::Graph &result = *graph;
303

304
  int num_trainers = Get<int>(kNumTrainers);
305

306
  for (auto &node : nodes) {
X
Xin Pan 已提交
307
    if (node->IsVar() && node->Var()) {
X
Xin Pan 已提交
308
      all_vars_.emplace(node->Name(), node->Var());
309
    }
C
fix ci  
chengduoZH 已提交
310
  }
C
chengduoZH 已提交
311
  std::unordered_set<std::string> og_has_been_broadcast;
Y
Yu Yang 已提交
312 313

  // We cannot invoke resize. It is a bug of GCC 4.8
X
Xin Pan 已提交
314 315 316
  result.Set(kGraphVars, new GraphVars(places_.size()));
  result.Set(kGraphDepVars, new GraphDepVars);
  result.Set(kGraphOps, new GraphOps);
317

Y
fix pe  
Yancey1989 已提交
318
  // find send/recv vars so that we can place the distributed training
319
  // related op in the place 0
X
Xin Pan 已提交
320 321
  auto send_vars = FindDistTrainSendVars(sorted_ops);
  auto recv_vars = FindDistTrainRecvVars(sorted_ops);
T
typhoonzero 已提交
322

C
chengduoZH 已提交
323 324 325
  std::vector<std::unordered_set<std::string>> bcast_var_name_set;
  bcast_var_name_set.resize(places_.size());

C
chengduoZH 已提交
326
  size_t cur_device_id = 0;
Y
Yu Yang 已提交
327
  bool is_forwarding = true;
Y
Yancey1989 已提交
328
  bool is_dist_train = false;
329

X
Xin Pan 已提交
330 331
  std::unordered_map<std::string, int> sharded_var_device;

X
better  
Xin Pan 已提交
332
  for (ir::Node *node : sorted_ops) {
Y
Yancey1989 已提交
333
    if (boost::get<int>(
334
            node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Yancey1989 已提交
335
        static_cast<int>(OpRole::kRPC)) {
X
Xin Pan 已提交
336
      int op_dev_id = CreateRPCOp(&result, node, &sharded_var_device);
Y
Yancey1989 已提交
337 338 339 340 341 342 343 344 345 346 347 348
      PADDLE_ENFORCE(op_dev_id != -1,
                     "Can not schedule the RPC operator to the right place.");
      if (node->Op()->Type() == "recv") {
        auto recv_vars_attr =
            boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
                OpProtoAndCheckerMaker::OpRoleVarAttrName()));
        PADDLE_ENFORCE(recv_vars_attr.size() == 2UL);  // [parameter, gradient]
        if (recv_vars_attr[0].find(".block") == std::string::npos) {
          bcast_var_name_set[op_dev_id].emplace(recv_vars_attr[0]);
        }
      }
      is_dist_train = true;
349 350 351
    } else if (boost::get<int>(node->Op()->GetAttr(
                   OpProtoAndCheckerMaker::OpRoleAttrName())) ==
               static_cast<int>(OpRole::kDist)) {
X
Xin Pan 已提交
352
      int op_dev_id = CreateDistTrainOp(&result, node, &sharded_var_device);
Y
Yancey1989 已提交
353 354 355 356
      if (node->Op()->Type() == "concat") {
        auto origin_param_name = node->Op()->OutputArgumentNames()[0];
        bcast_var_name_set[op_dev_id].emplace(origin_param_name);
      }
X
Xin Pan 已提交
357
    } else if (IsScaleLossOp(node)) {
Y
Yu Yang 已提交
358
      // user can customize loss@grad if not use_default_grad_scale_
Y
yuyang18 已提交
359 360
      if (strategy_.gradient_scale_ !=
          BuildStrategy::GradientScaleStrategy::kCustomized) {
X
Xin Pan 已提交
361
        // TODO(paddle-dev): Why is there no input for this op_handle?
362
        auto loss_grad_name = node->Op()->OutputArgumentNames()[0];
363
        CreateScaleLossGradOp(&result, loss_grad_name, node->outputs[0]);
Y
Yu Yang 已提交
364
      }
365 366 367 368
      // This assumes the backward generating code will ensure IsScaleLossOp
      // is true only for the op that scale the final scalar loss.
      // It also assumes backward op will always follow the forward op in
      // the block.
Y
Yu Yang 已提交
369
      is_forwarding = false;
Y
Yu Yang 已提交
370
    } else {
X
Xin Pan 已提交
371
      int op_dev_id = GetOpDeviceID(result, node, sharded_var_device);
C
chengduo 已提交
372
      if (op_dev_id != -1) {  // This op only runs on one specific device.
X
Xin Pan 已提交
373
        CreateComputationalOp(&result, node, op_dev_id);
374
        for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
375
          sharded_var_device.emplace(n->Name(), op_dev_id);
C
chengduoZH 已提交
376
        }
C
chengduo 已提交
377 378 379
      } else {
        // This op runs on all devices, and its output may have parameter's
        // gradients.
X
Xin Pan 已提交
380
        // TODO(paddle-dev): Why is so special about "read" op?
381 382
        if (node->Op()->Type() == "read" && strategy_.enable_data_balance_) {
          node->Op()->SetAttr("throw_eof_exp", false);
X
Xin Pan 已提交
383
          CreateComputationalOps(&result, node, places_.size());
384
          const auto &data_var_names = node->Op()->Output("Out");
385
          InsertDataBalanceOp(&result, data_var_names);
F
fengjiayi 已提交
386
        } else {
X
Xin Pan 已提交
387
          CreateComputationalOps(&result, node, places_.size());
388 389
        }

Y
Yancey1989 已提交
390 391
// insert collective ops at the backpropagation; and
// insert collective ops if the graph contains mutilple places.
Y
Yancey1989 已提交
392 393

#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
394 395 396
        if (!is_forwarding &&
            (places_.size() > 1 || num_trainers > 1 ||
             (nccl_ctxs_ && nccl_ctxs_->contexts_.size() > 1))) {
Y
Yancey1989 已提交
397 398 399
#else
        if (!is_forwarding && (places_.size() > 1 || num_trainers > 1)) {
#endif
C
chengduo 已提交
400 401
          // Currently, we assume that once gradient is generated, it can be
          // broadcast, and each gradient is only broadcast once.
402
          if (static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
C
chengduo 已提交
403 404 405
                                    OpProtoAndCheckerMaker::OpRoleAttrName())) &
                                static_cast<int>(OpRole::kBackward))) {
            try {
406 407
              auto backward_vars = boost::get<std::vector<std::string>>(
                  node->Op()->GetNullableAttr(
C
chengduo 已提交
408
                      OpProtoAndCheckerMaker::OpRoleVarAttrName()));
Y
yuyang18 已提交
409

C
chengduo 已提交
410
              PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
Y
yuyang18 已提交
411

C
chengduo 已提交
412 413 414
              for (size_t i = 0; i < backward_vars.size(); i += 2) {
                auto &p_name = backward_vars[i];
                auto &g_name = backward_vars[i + 1];
M
minqiyang 已提交
415
                VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;
Y
yuyang18 已提交
416

C
chengduo 已提交
417 418 419 420
                switch (strategy_.reduce_) {
                  case BuildStrategy::ReduceStrategy::kReduce:
                    cur_device_id = GetAppropriateDeviceID({g_name});
                    CreateReduceOp(&result, g_name, cur_device_id);
X
Xin Pan 已提交
421
                    sharded_var_device.emplace(g_name, cur_device_id);
Y
Yancey1989 已提交
422 423 424
                    if (!is_dist_train) {
                      bcast_var_name_set[cur_device_id].emplace(p_name);
                    }
C
chengduo 已提交
425 426 427 428 429 430 431 432 433 434 435 436 437
                    break;
                  case BuildStrategy::ReduceStrategy::kAllReduce:
                    if (IsSparseGradient(g_name)) {
                      CreateReduceOp(&result, g_name, 0);
                      CreateBroadcastOp(&result, g_name, 0);
                    } else {
                      InsertAllReduceOp(&result, g_name);
                    }
                    break;
                  default:
                    LOG(FATAL) << "Unknown reduce strategy ";
                    break;
                }
Y
yuyang18 已提交
438
              }
C
chengduo 已提交
439
            } catch (boost::bad_get e) {
C
chengduoZH 已提交
440
            }
Y
Yu Yang 已提交
441 442 443 444 445
          }
        }
      }
    }
  }
446
  bool use_gpu = false;
P
peizhilin 已提交
447
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
448 449 450
  use_gpu = nccl_ctxs_ != nullptr;
#endif

Y
Yancey1989 已提交
451 452 453 454 455
  // Insert broadcast operators principle:
  // 1. Broadcast optimized parameters in Reduce strategy;
  // 2. No need broadcast optimized parameters in AllReduce strategy because of
  //    the optimization sub-graph would be run on every GPU;
  // 3. Allways broadcast received parameters in Distribute Training.
Y
Yancey1989 已提交
456 457 458
  if ((use_gpu &&
       strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce) ||
      is_dist_train) {
459 460 461 462 463 464 465 466
    if (strategy_.fuse_broadcast_op_) {
      CreateFusedBroadcastOp(&result, bcast_var_name_set);
    } else {
      for (size_t dev_id = 0; dev_id < bcast_var_name_set.size(); ++dev_id) {
        auto &to_bcast_set = bcast_var_name_set[dev_id];
        for (auto &bcast_name : to_bcast_set) {
          CreateBroadcastOp(&result, bcast_name, dev_id);
        }
467
      }
C
chengduoZH 已提交
468 469
    }
  }
Y
Yu Yang 已提交
470
  /*
X
Xin Pan 已提交
471 472 473
  Dependency graph has been constructed. However, there are still data
  hazards need to be handled.
 */
Y
Yu Yang 已提交
474
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
475

Y
Yu Yang 已提交
476 477 478 479
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);
X
Xin Pan 已提交
480
  result.Erase<GraphOps>(kGraphOps);
Q
qiaolongfei 已提交
481
  return graph;
Y
Yu Yang 已提交
482 483
}

Y
Yancey1989 已提交
484 485 486
bool MultiDevSSAGraphBuilder::IsSparseGradient(const std::string &og) const {
  PADDLE_ENFORCE(all_vars_.count(og) != 0);
  if (all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS) {
C
fix ci  
chengduoZH 已提交
487 488 489
    return true;
  }
  return false;
490 491
}

492 493
void MultiDevSSAGraphBuilder::SetCommunicationContext(
    OpHandleBase *op_handle, const platform::Place &p) const {
P
peizhilin 已提交
494
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
495 496 497 498 499 500 501 502 503 504
  if (nccl_ctxs_ == nullptr) {
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
  }
#else
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
#endif
}

X
Xin Pan 已提交
505
void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result,
C
chengduoZH 已提交
506
                                                const std::string &p_name,
C
chengduoZH 已提交
507
                                                size_t src_dev_id) const {
P
peizhilin 已提交
508
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
polish  
Xin Pan 已提交
509 510 511
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
C
chengduoZH 已提交
512
#else
X
polish  
Xin Pan 已提交
513 514 515
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
C
chengduoZH 已提交
516
#endif
X
Xin Pan 已提交
517
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
X
Xin Pan 已提交
518

X
Xin Pan 已提交
519
  auto *in =
X
clean1  
Xin Pan 已提交
520
      result->Get<GraphVars>(kGraphVars).at(src_dev_id).at(p_name).back();
C
chengduoZH 已提交
521 522 523 524
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
525
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
526
    auto &vars = result->Get<GraphVars>(kGraphVars).at(i).at(p_name);
X
polish  
Xin Pan 已提交
527 528 529
    auto *out_var = new VarHandle(
        result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(),
        i, p_name, p);
C
chengduoZH 已提交
530 531 532 533 534
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
  }
}

535 536 537
void MultiDevSSAGraphBuilder::CreateFusedBroadcastOp(
    ir::Graph *result,
    const std::vector<std::unordered_set<std::string>> &bcast_varnames) const {
P
peizhilin 已提交
538
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
#else
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
#endif
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
  }

  for (size_t dev_id = 0; dev_id < bcast_varnames.size(); ++dev_id) {
    for (auto &p_name : bcast_varnames[dev_id]) {
      auto *in =
X
clean1  
Xin Pan 已提交
557
          result->Get<GraphVars>(kGraphVars).at(dev_id).at(p_name).back();
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
      op_handle->AddInput(in);
      for (size_t out_dev_id = 0; out_dev_id < places_.size(); ++out_dev_id) {
        auto &p = places_[out_dev_id];
        auto &vars =
            result->Get<GraphVars>(kGraphVars).at(out_dev_id).at(p_name);
        auto *out_var = new VarHandle(
            result->CreateEmptyNode(p_name, ir::Node::Type::kVariable),
            vars.size(), out_dev_id, p_name, p);
        vars.emplace_back(out_var);
        op_handle->AddOutput(out_var);
      }
    }
  }
}

X
Xin Pan 已提交
573
void MultiDevSSAGraphBuilder::CreateComputationalOp(ir::Graph *result,
574
                                                    ir::Node *node,
C
chengduoZH 已提交
575
                                                    int dev_id) const {
X
Xin Pan 已提交
576
  result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
577
      new ComputationOpHandle(result->CreateOpNode(node->Op()),
S
sneaxiy 已提交
578
                              local_scopes_[dev_id], places_[dev_id], dev_id));
579
  CreateOpHandleIOs(result, node, dev_id);
C
chengduoZH 已提交
580 581
}

X
Xin Pan 已提交
582
void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result,
C
chengduoZH 已提交
583
                                                const std::string &og) const {
P
peizhilin 已提交
584
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
585
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
586 587
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
588
#else
X
Xin Pan 已提交
589
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
590 591
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
592
#endif
X
clean1  
Xin Pan 已提交
593
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
Y
Yu Yang 已提交
594 595 596

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
597
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
598
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
Y
Yu Yang 已提交
599 600
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
601
    op_handle->AddInput(prev_grad);
Y
Yu Yang 已提交
602

X
Xin Pan 已提交
603
    auto var =
X
polish  
Xin Pan 已提交
604 605
        new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                      vars.size(), i, og, p);
Y
Yu Yang 已提交
606 607 608 609 610
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
}

611
void MultiDevSSAGraphBuilder::InsertDataBalanceOp(
X
Xin Pan 已提交
612
    ir::Graph *result, const std::vector<std::string> &datas) const {
P
peizhilin 已提交
613
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
614
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
615 616
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
F
fengjiayi 已提交
617
#else
X
Xin Pan 已提交
618
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
619 620
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_));
F
fengjiayi 已提交
621
#endif
X
clean1  
Xin Pan 已提交
622
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
623 624 625 626
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
    for (const std::string &d_name : datas) {
X
Xin Pan 已提交
627
      auto &vars = result->Get<GraphVars>(kGraphVars)[i][d_name];
628
      PADDLE_ENFORCE(!vars.empty());
X
clean1  
Xin Pan 已提交
629
      op_handle->AddInput(vars.back());
X
polish  
Xin Pan 已提交
630 631 632
      auto var = new VarHandle(
          result->CreateEmptyNode(d_name, ir::Node::Type::kVariable),
          vars.size(), i, d_name, p);
633 634 635 636 637 638
      vars.emplace_back(var);
      op_handle->AddOutput(var);
    }
  }
}

X
Xin Pan 已提交
639 640 641
int MultiDevSSAGraphBuilder::GetOpDeviceID(
    const ir::Graph &graph, ir::Node *node,
    const std::unordered_map<std::string, int> &sharded_var_device) const {
Y
yuyang18 已提交
642
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
C
chengduoZH 已提交
643 644
    return -1;
  }
645
  int op_role = boost::get<int>(
646
      node->Op()->GetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
647 648
  if (op_role != static_cast<int>(framework::OpRole::kOptimize)) {
    return -1;
C
chengduoZH 已提交
649
  }
650
  auto param_grad = boost::get<std::vector<std::string>>(
X
Xin Pan 已提交
651
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
652 653

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
X
Xin Pan 已提交
654
  int dev_id = GetVarDeviceID(graph, param_grad[1], sharded_var_device);
X
Xin Pan 已提交
655 656
  PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]",
                    node->Op()->Type(), param_grad[0], param_grad[1]);
657
  return dev_id;
658 659
}

X
Xin Pan 已提交
660 661 662
int MultiDevSSAGraphBuilder::GetVarDeviceID(
    const ir::Graph &graph, const std::string &varname,
    const std::unordered_map<std::string, int> &sharded_var_device) const {
X
Xin Pan 已提交
663
  auto got = sharded_var_device.find(varname);
C
chengduo 已提交
664 665 666 667 668 669
  if (got == sharded_var_device.end()) {
    auto pos = varname.find(framework::kNewGradSuffix);
    if (pos != std::string::npos) {
      got = sharded_var_device.find(varname.substr(0, pos));
    }
  }
X
Xin Pan 已提交
670
  return got == sharded_var_device.end() ? -1 : got->second;
C
chengduoZH 已提交
671 672
}

673
void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(
674 675
    ir::Graph *result, const std::string &loss_grad_name,
    ir::Node *out_var_node) const {
Y
Yu Yang 已提交
676
  for (size_t i = 0; i < places_.size(); ++i) {
Y
yuyang18 已提交
677 678
    // Insert ScaleCost OpHandle
    auto *dev_ctx = platform::DeviceContextPool::Instance().Get(places_[i]);
X
Xin Pan 已提交
679
    auto *op_handle = new ScaleLossGradOpHandle(
X
polish  
Xin Pan 已提交
680
        result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation),
Y
yuyang18 已提交
681
        local_scopes_.size(), local_scopes_[i], places_[i], dev_ctx);
X
Xin Pan 已提交
682
    result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
Y
Yu Yang 已提交
683 684 685 686 687 688 689

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

690 691
    CreateOpOutput(result, op_handle,
                   result->CreateVarNode(out_var_node->Var()), places_[i], i);
Y
Yu Yang 已提交
692 693 694
  }
}

X
Xin Pan 已提交
695
void MultiDevSSAGraphBuilder::CreateComputationalOps(ir::Graph *result,
696
                                                     ir::Node *node,
T
typhoonzero 已提交
697 698
                                                     size_t num_places) const {
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
699 700
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
S
sneaxiy 已提交
701 702
    result->Get<GraphOps>(kGraphOps).emplace_back(new ComputationOpHandle(
        result->CreateOpNode(node->Op()), s, p, scope_idx));
703
    CreateOpHandleIOs(result, node, scope_idx);
Y
Yu Yang 已提交
704 705 706
  }
}

X
Xin Pan 已提交
707
VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result,
C
chengduoZH 已提交
708 709
                                                   const std::string &og,
                                                   int dst_dev_id) const {
P
peizhilin 已提交
710
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
711
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
712 713
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
714
#else
X
Xin Pan 已提交
715
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
716 717
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
718
#endif
X
clean1  
Xin Pan 已提交
719
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
C
chengduoZH 已提交
720 721 722

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
723
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
724
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
C
chengduoZH 已提交
725 726
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
727
    op_handle->AddInput(prev_grad);
C
chengduoZH 已提交
728
  }
X
Xin Pan 已提交
729
  auto &vars = result->Get<GraphVars>(kGraphVars)[dst_dev_id][og];
X
polish  
Xin Pan 已提交
730 731 732
  auto var =
      new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                    vars.size(), dst_dev_id, og, places_[dst_dev_id]);
C
chengduoZH 已提交
733 734 735 736 737
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

X
Xin Pan 已提交
738 739 740
int MultiDevSSAGraphBuilder::CreateDistTrainOp(
    ir::Graph *result, ir::Node *node,
    std::unordered_map<std::string, int> *sharded_var_device) const {
Y
Yancey1989 已提交
741
  int op_dev_id = -1;
742 743 744
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
X
Xin Pan 已提交
745
    input_var_names.push_back(input->Name());
746 747
  }
  for (ir::Node *output : node->outputs) {
X
Xin Pan 已提交
748
    output_var_names.push_back(output->Name());
749 750 751
  }

  if (node->Op()->Type() == "split_byref" ||
752 753
      node->Op()->Type() == "split_selected_rows" ||
      node->Op()->Type() == "split_ids") {
X
Xin Pan 已提交
754
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
755 756
    op_dev_id =
        GetVarDeviceID(*result, input_var_names[0], *sharded_var_device);
Y
Yancey1989 已提交
757
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
758 759
      op_dev_id = GetAppropriateDeviceID(input_var_names);
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
760
        sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
761 762
      }
    }
763
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
764
      sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
765
    }
766
  } else if (node->Op()->Type() == "concat") {
X
Xin Pan 已提交
767 768
    op_dev_id =
        GetVarDeviceID(*result, input_var_names[0], *sharded_var_device);
769
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
770
      sharded_var_device->emplace(varname, op_dev_id);
Y
yi.wu 已提交
771
    }
Y
Yancey1989 已提交
772
  } else {
773
    LOG(ERROR) << "got unexpected dist op: " << node->Op()->Type();
W
Wu Yi 已提交
774
    PADDLE_THROW(
Y
Yancey1989 已提交
775 776 777 778 779
        "the distribute training related op should be in [split_byref, "
        "concat].");
  }

  PADDLE_ENFORCE(op_dev_id != -1,
780 781
                 "can not find right place for distributed op: %s",
                 node->Op()->Type());
Y
Yancey1989 已提交
782

783
  CreateComputationalOp(result, node, op_dev_id);
Y
Yancey1989 已提交
784
  return op_dev_id;
W
Wu Yi 已提交
785 786 787
}

void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) {
X
clean1  
Xin Pan 已提交
788
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
789 790 791 792 793 794
  for (ir::Node *input : node->inputs) {
    VarHandle *var = nullptr;
    for (int place_offset = 0; place_offset < num_places; ++place_offset) {
      auto &var_holders = result->Get<GraphVars>(kGraphVars)[place_offset];
      auto &var_holder = var_holders[input->Name()];
      if (!var_holder.empty()) {
X
clean1  
Xin Pan 已提交
795
        var = *var_holder.rbegin();
W
Wu Yi 已提交
796 797 798
        op_handle->AddInput(var);
      }
    }
Y
Yancey1989 已提交
799 800 801
  }
}

802
// Create RPC related op handles that connects its in ops and out ops.
X
Xin Pan 已提交
803 804 805
int MultiDevSSAGraphBuilder::CreateRPCOp(
    ir::Graph *result, ir::Node *node,
    std::unordered_map<std::string, int> *sharded_var_device) const {
Y
Yancey1989 已提交
806
  int op_dev_id = -1;
807
  if (node->Op()->Type() == "send") {
X
Xin Pan 已提交
808
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
809 810
    op_dev_id =
        GetVarDeviceID(*result, node->inputs[0]->Name(), *sharded_var_device);
X
Xin Pan 已提交
811 812
    PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
                   "This hack no longer holds, please fix.");
Y
Yancey1989 已提交
813 814 815
    // the variable name which contains .block means it was splited by
    // split_byref op
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce &&
X
Xin Pan 已提交
816
        node->inputs[0]->Name().find(".block") == std::string::npos) {
817 818
      std::vector<std::string> input_var_names;
      for (ir::Node *n : node->inputs) {
X
Xin Pan 已提交
819
        input_var_names.push_back(n->Name());
820
      }
W
Wu Yi 已提交
821 822 823 824
      auto send_param_grad = boost::get<std::vector<std::string>>(
          node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
      op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
M
minqiyang 已提交
825 826
      VLOG(10) << "send grad " << input_var_names[0] << " origin "
               << send_param_grad[1] << " place: " << op_dev_id;
827
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
828
        sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
829
      }
X
Xin Pan 已提交
830
      sharded_var_device->emplace(send_param_grad[1], op_dev_id);
Y
Yancey1989 已提交
831
    }
832 833 834
  } else if (node->Op()->Type() == "recv") {
    std::vector<std::string> output_var_names;
    for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
835
      output_var_names.push_back(n->Name());
836
    }
W
Wu Yi 已提交
837 838 839
    auto recv_param_grad = boost::get<std::vector<std::string>>(
        node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
    if (recv_param_grad.size() == 2U) {
X
Xin Pan 已提交
840 841
      op_dev_id =
          GetVarDeviceID(*result, recv_param_grad[1], *sharded_var_device);
M
minqiyang 已提交
842 843 844
      VLOG(10) << "recv param " << recv_param_grad[0]
               << " get grad place: " << recv_param_grad[1]
               << " place: " << op_dev_id;
W
Wu Yi 已提交
845 846 847
    } else {
      op_dev_id = GetAppropriateDeviceID(output_var_names);
    }
848
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
849
      sharded_var_device->emplace(varname, op_dev_id);
Y
Yancey1989 已提交
850 851
    }
  } else {
W
Wu Yi 已提交
852
    // send_barrier, fetch_barrier will run on place 0;
Y
Yancey1989 已提交
853 854 855 856
    op_dev_id = 0;
  }

  PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s",
857
                 node->Op()->Type());
X
Xin Pan 已提交
858
  result->Get<GraphOps>(kGraphOps).emplace_back(new RPCOpHandle(
859 860
      result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id],
      node->Op()->Type(), places_[op_dev_id]));
Y
fix pe  
Yancey1989 已提交
861

W
Wu Yi 已提交
862 863
  if (node->Op()->Type() == "send") {
    CreateOpHandleIOs(result, node, op_dev_id);
Y
Yancey1989 已提交
864
  } else {
W
Wu Yi 已提交
865 866 867
    // send_barrier, recv, fetch_barrier's inputs are deps var, get them from
    // all places
    auto p = places_[op_dev_id];
X
clean1  
Xin Pan 已提交
868
    auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
869 870
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
Y
Yancey1989 已提交
871

W
Wu Yi 已提交
872 873 874 875
    SetOpInputsAllPlaces(result, node, places_.size());
    for (ir::Node *output : node->outputs) {
      int outvar_dev_id = op_dev_id;
      if (node->Op()->Type() == "fetch_barrier") {
X
Xin Pan 已提交
876 877
        outvar_dev_id =
            GetVarDeviceID(*result, output->Name(), *sharded_var_device);
Q
Qiao Longfei 已提交
878
        PADDLE_ENFORCE_NE(outvar_dev_id, -1, "output name %s", output->Name());
W
Wu Yi 已提交
879 880 881 882 883 884 885 886 887 888 889 890
      }
      p = places_[outvar_dev_id];
      ir::Node *new_node = nullptr;
      if (output->Var()) {
        new_node = result->CreateVarNode(output->Var());
      } else {
        new_node =
            result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
      }
      CreateOpOutput(result, op_handle, new_node, p, outvar_dev_id);
    }
  }
Y
Yancey1989 已提交
891
  return op_dev_id;
Y
Yu Yang 已提交
892 893
}

894
bool MultiDevSSAGraphBuilder::IsScaleLossOp(ir::Node *node) const {
Y
yuyang18 已提交
895
  return boost::get<int>(
896
             node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Fix bug  
yuyang18 已提交
897 898 899
             (static_cast<int>(OpRole::kBackward) |
              static_cast<int>(OpRole::kLoss)) &&
         !loss_var_name_.empty();  // If loss_var is empty. This is test mode
Y
Yu Yang 已提交
900
}
Y
Yu Yang 已提交
901 902 903
}  // namespace details
}  // namespace framework
}  // namespace paddle
X
Xin Pan 已提交
904

X
Xin Pan 已提交
905
REGISTER_PASS(multi_devices_pass,
X
Xin Pan 已提交
906 907 908 909 910
              paddle::framework::details::MultiDevSSAGraphBuilder)
    .RequirePassAttr(paddle::framework::details::kLossVarName)
    .RequirePassAttr(paddle::framework::details::kPlaces)
    .RequirePassAttr(paddle::framework::details::kParams)
    .RequirePassAttr(paddle::framework::details::kLocalScopes)
911 912
    .RequirePassAttr(paddle::framework::details::kStrategy)
    .RequirePassAttr(paddle::framework::details::kNumTrainers);