multi_devices_graph_builder.cc 29.0 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
C
chengduoZH 已提交
14
#include <algorithm>
Y
Yancey1989 已提交
15
#include <fstream>
C
chengduoZH 已提交
16
#include <string>
C
chengduoZH 已提交
17
#include <utility>
C
chengduoZH 已提交
18 19
#include <vector>

20
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
C
chengduoZH 已提交
21
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
22
#include "paddle/fluid/framework/details/computation_op_handle.h"
23
#include "paddle/fluid/framework/details/data_balance_op_handle.h"
C
chengduoZH 已提交
24
#include "paddle/fluid/framework/details/multi_devices_graph_builder.h"
C
chengduoZH 已提交
25
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yancey1989 已提交
26
#include "paddle/fluid/framework/details/rpc_op_handle.h"
Y
Yu Yang 已提交
27
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
X
better  
Xin Pan 已提交
28
#include "paddle/fluid/framework/ir/graph_helper.h"
X
Xin Pan 已提交
29
#include "paddle/fluid/framework/ir/node.h"
Y
Fix bug  
yuyang18 已提交
30
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
31
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
32

Y
Yu Yang 已提交
33 34 35
namespace paddle {
namespace framework {
namespace details {
Y
Yu Yang 已提交
36

X
Xin Pan 已提交
37 38 39 40 41 42
static const char kLossVarName[] = "loss_var_name";
static const char kPlaces[] = "places";
static const char kParams[] = "params";
static const char kLocalScopes[] = "local_scopes";
static const char kStrategy[] = "strategy";

X
Xin Pan 已提交
43
void MultiDevSSAGraphBuilder::Init() const {
X
Xin Pan 已提交
44 45 46 47
  loss_var_name_ = Get<const std::string>(kLossVarName);
  places_ = Get<const std::vector<platform::Place>>(kPlaces);
  local_scopes_ = Get<const std::vector<Scope *>>(kLocalScopes);
  strategy_ = Get<const BuildStrategy>(kStrategy);
Y
Yu Yang 已提交
48
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
49
  nccl_ctxs_ = &Get<platform::NCCLContextMap>("nccl_ctxs");
Y
Yu Yang 已提交
50
#endif
X
Xin Pan 已提交
51

X
Xin Pan 已提交
52
  for (auto &p : Get<const std::unordered_set<std::string>>(kParams)) {
Y
Yu Yang 已提交
53 54
    grad_names_.insert(GradVarName(p));
  }
Y
Yancey1989 已提交
55
  balance_vars_.resize(places_.size(), 0);
Y
yuyang18 已提交
56 57 58 59 60
  if (strategy_.enable_data_balance_ && places_.size() == 1) {
    LOG(WARNING) << "It is no need to enable data balance when there is only "
                    "one place. enable_data_balance is set to False.";
    strategy_.enable_data_balance_ = false;
  }
Y
Yu Yang 已提交
61 62
}

X
Xin Pan 已提交
63 64
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(ir::Graph *result,
                                                ir::Node *node,
Y
Yu Yang 已提交
65 66
                                                size_t place_id) const {
  auto p = places_[place_id];
X
Xin Pan 已提交
67
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
X
Xin Pan 已提交
68 69
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
T
wip  
typhoonzero 已提交
70

71 72
  for (ir::Node *input : node->inputs) {
    VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id);
T
wip  
typhoonzero 已提交
73 74 75
    op_handle->AddInput(var);
  }

76
  for (ir::Node *output : node->outputs) {
X
polish  
Xin Pan 已提交
77 78 79 80 81 82 83 84
    ir::Node *new_node = nullptr;
    if (output->Var()) {
      new_node = result->CreateVarNode(output->Var());
    } else {
      new_node =
          result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
    }
    CreateOpOutput(result, op_handle, new_node, p, place_id);
T
wip  
typhoonzero 已提交
85 86
  }
}
Y
fix pe  
Yancey1989 已提交
87 88

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainSendVars(
X
Xin Pan 已提交
89
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
90
  std::vector<std::string> send_vars;
Y
Yancey1989 已提交
91 92
  // since parameters are all in block 0,
  // it's enough to only scan send ops in block 0
93 94
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
95 96
    // TODO(Yancey1989): use a graceful method to find send op,
    // instead of the the hard code string
97
    if (op->Type() == "send") {
Y
fix pe  
Yancey1989 已提交
98 99 100 101 102 103 104 105 106 107
      auto op_vars = op->InputArgumentNames();
      send_vars.reserve(send_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      send_vars.insert(send_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return send_vars;
}

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainRecvVars(
X
Xin Pan 已提交
108
    const std::vector<ir::Node *> &nodes) const {
Y
fix pe  
Yancey1989 已提交
109
  std::vector<std::string> recv_vars;
110 111
  for (auto &node : nodes) {
    OpDesc *op = node->Op();
Y
Yancey1989 已提交
112 113 114
    // TODO(Yancey1989): use a graceful method to find recv op,
    // instead of the hard code string
    if (op->Type() == "recv") {
Y
fix pe  
Yancey1989 已提交
115 116 117 118 119 120 121 122 123 124
      auto op_vars = op->OutputArgumentNames();
      recv_vars.reserve(recv_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      recv_vars.insert(recv_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return recv_vars;
}

bool MultiDevSSAGraphBuilder::IsDistTrainOp(
125
    ir::Node *node, const std::vector<std::string> &send_vars,
Y
fix pe  
Yancey1989 已提交
126 127
    const std::vector<std::string> &recv_vars) const {
  if (send_vars.size() == 0 || recv_vars.size() == 0) {
T
typhoonzero 已提交
128 129 130
    return false;
  }

Y
Yu Yang 已提交
131 132 133 134
  /**
   * Check any of opvars contains `.block` and in sendvars
   */
  auto checker = [](const std::vector<std::string> &opvars,
Y
fix pe  
Yancey1989 已提交
135
                    const std::vector<std::string> &rpc_vars) -> bool {
T
typhoonzero 已提交
136
    for (auto &var : opvars) {
Y
Yancey1989 已提交
137 138 139
      // a variable name with the suffix `.block` means it's a splited
      // variable by (DistributeTranspiler)
      // [python/paddle/fluid/transpiler/distribute_transpiler.py]
T
typhoonzero 已提交
140
      if (var.find(".block") != std::string::npos &&
Y
fix pe  
Yancey1989 已提交
141
          std::find(rpc_vars.begin(), rpc_vars.end(), var) != rpc_vars.end()) {
Y
Yu Yang 已提交
142
        return true;
T
typhoonzero 已提交
143 144
      }
    }
Y
Yu Yang 已提交
145
    return false;
T
typhoonzero 已提交
146 147
  };

148 149 150
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
X
Xin Pan 已提交
151
    input_var_names.push_back(input->Name());
152 153
  }
  for (ir::Node *output : node->outputs) {
X
Xin Pan 已提交
154
    output_var_names.push_back(output->Name());
155 156 157 158
  }

  return checker(output_var_names, send_vars) ||
         checker(input_var_names, recv_vars);
T
typhoonzero 已提交
159 160
}

Y
Yancey1989 已提交
161 162 163 164
size_t MultiDevSSAGraphBuilder::GetAppropriateDeviceID(
    const std::vector<std::string> &var_names) const {
  int64_t numel_sum = 0;
  for (auto var_name : var_names) {
X
Xin Pan 已提交
165
    if (all_vars_.find(var_name) == all_vars_.end()) continue;
Y
Yancey1989 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
    auto var_desc = all_vars_.at(var_name);
    PADDLE_ENFORCE_NOT_NULL(var_desc);
    auto dim = framework::make_ddim(var_desc->GetShape());
    int64_t numel = framework::product(dim);
    PADDLE_ENFORCE_GT(numel, 0);
    numel_sum += numel;
  }

  auto smallest =
      std::min_element(std::begin(balance_vars_), std::end(balance_vars_));
  size_t dev_id =
      static_cast<size_t>(std::distance(std::begin(balance_vars_), smallest));
  balance_vars_[dev_id] += numel_sum;
  return dev_id;
}

X
better  
Xin Pan 已提交
182 183 184 185 186
// Topology sort the graph nodes from inputs to outputs.
// Since SSAGraphBuilder depends on forward/backward nodes to assign devices
// to parameter/gradients before optimizer ops, topo sort is insufficient. (
// some optimizer ops might not depend on any nodes), we manually move all
// optimizer nodes after last backward nodes.
X
Xin Pan 已提交
187 188 189
// However, the assumption by SSAGraphBuilder should be relaxed in the future.
std::vector<ir::Node *> SortOpsAndDelayOptimizeOp(const ir::Graph &graph) {
  std::vector<ir::Node *> ret = ir::TopologySortOperations(graph);
X
better  
Xin Pan 已提交
190 191 192 193 194
  size_t last_backward = 0;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (boost::get<int>(
            ret[i]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
        static_cast<int>(OpRole::kBackward)) {
X
Xin Pan 已提交
195
      last_backward = i;
X
better  
Xin Pan 已提交
196 197 198
    }
  }

X
Xin Pan 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
  std::vector<ir::Node *> optimize_ops;
  std::vector<ir::Node *> sorted_ret;
  for (size_t i = 0; i < ret.size(); ++i) {
    if (i < last_backward) {
      if (boost::get<int>(ret[i]->Op()->GetAttr(
              OpProtoAndCheckerMaker::OpRoleAttrName())) ==
          static_cast<int>(OpRole::kOptimize)) {
        optimize_ops.push_back(ret[i]);
      } else {
        sorted_ret.push_back(ret[i]);
      }
    } else if (i == last_backward) {
      sorted_ret.push_back(ret[i]);
      // Verify that no operations before optimize ops depends on optimize ops.
      std::unordered_set<ir::Node *> optimize_set(optimize_ops.begin(),
                                                  optimize_ops.end());
      for (ir::Node *n : sorted_ret) {
        for (ir::Node *in : n->inputs) {
          for (ir::Node *pre_n : in->inputs) {
            PADDLE_ENFORCE(optimize_set.find(pre_n) == optimize_set.end(),
                           "optimize operations cannot be depended by forward "
                           "or backward node %s -> %s",
                           pre_n->Name(), n->Name());
          }
        }
X
Xin Pan 已提交
224
      }
X
Xin Pan 已提交
225 226 227 228
      sorted_ret.insert(sorted_ret.end(), optimize_ops.begin(),
                        optimize_ops.end());
    } else {
      sorted_ret.push_back(ret[i]);
X
Xin Pan 已提交
229 230
    }
  }
X
better  
Xin Pan 已提交
231 232 233
  return sorted_ret;
}

X
Xin Pan 已提交
234
std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilder::ApplyImpl(
X
Xin Pan 已提交
235
    std::unique_ptr<ir::Graph> graph) const {
X
Xin Pan 已提交
236
  Init();
X
Xin Pan 已提交
237
  // Give the topology sort order and rebuild the graph structure.
X
better  
Xin Pan 已提交
238
  std::vector<ir::Node *> sorted_ops = SortOpsAndDelayOptimizeOp(*graph);
X
Xin Pan 已提交
239 240
  auto nodes = graph->ReleaseNodes();
  ir::Graph &result = *graph;
241 242

  for (auto &node : nodes) {
X
Xin Pan 已提交
243
    if (node->NodeType() == ir::Node::Type::kVariable && node->Var()) {
X
Xin Pan 已提交
244
      all_vars_.emplace(node->Name(), node->Var());
245
    }
C
fix ci  
chengduoZH 已提交
246
  }
C
chengduoZH 已提交
247
  std::unordered_set<std::string> og_has_been_broadcast;
Y
Yu Yang 已提交
248 249

  // We cannot invoke resize. It is a bug of GCC 4.8
X
Xin Pan 已提交
250 251 252 253
  result.Set(kGraphVars, new GraphVars(places_.size()));
  result.Set(kGraphDepVars, new GraphDepVars);
  result.Set(kGraphOps, new GraphOps);
  result.Set(kShardedVarDevice, new ShardedVarDevice);
254

Y
fix pe  
Yancey1989 已提交
255
  // find send/recv vars so that we can place the distributed training
256
  // related op in the place 0
X
Xin Pan 已提交
257 258
  auto send_vars = FindDistTrainSendVars(sorted_ops);
  auto recv_vars = FindDistTrainRecvVars(sorted_ops);
T
typhoonzero 已提交
259

C
chengduoZH 已提交
260 261 262
  std::vector<std::unordered_set<std::string>> bcast_var_name_set;
  bcast_var_name_set.resize(places_.size());

C
chengduoZH 已提交
263
  size_t cur_device_id = 0;
Y
Yu Yang 已提交
264
  bool is_forwarding = true;
265

X
better  
Xin Pan 已提交
266
  for (ir::Node *node : sorted_ops) {
Y
Yancey1989 已提交
267
    if (boost::get<int>(
268
            node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Yancey1989 已提交
269
        static_cast<int>(OpRole::kRPC)) {
X
Xin Pan 已提交
270 271 272 273
      CreateRPCOp(&result, node);
    } else if (IsDistTrainOp(node, send_vars, recv_vars)) {
      CreateDistTrainOp(&result, node);
    } else if (IsScaleLossOp(node)) {
Y
Yu Yang 已提交
274
      // user can customize loss@grad if not use_default_grad_scale_
Y
yuyang18 已提交
275 276
      if (strategy_.gradient_scale_ !=
          BuildStrategy::GradientScaleStrategy::kCustomized) {
X
Xin Pan 已提交
277
        // TODO(paddle-dev): Why is there no input for this op_handle?
278 279
        auto loss_grad_name = node->Op()->OutputArgumentNames()[0];
        CreateScaleLossGradOp(&result, loss_grad_name);
Y
Yu Yang 已提交
280
      }
281 282 283 284
      // This assumes the backward generating code will ensure IsScaleLossOp
      // is true only for the op that scale the final scalar loss.
      // It also assumes backward op will always follow the forward op in
      // the block.
Y
Yu Yang 已提交
285
      is_forwarding = false;
Y
Yu Yang 已提交
286
    } else {
X
Xin Pan 已提交
287
      int op_dev_id = GetOpDeviceID(result, node);
C
chengduo 已提交
288
      if (op_dev_id != -1) {  // This op only runs on one specific device.
X
Xin Pan 已提交
289
        CreateComputationalOp(&result, node, op_dev_id);
290
        for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
291
          graph->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
292
              .emplace(n->Name(), op_dev_id);
C
chengduoZH 已提交
293
        }
C
chengduo 已提交
294 295 296
      } else {
        // This op runs on all devices, and its output may have parameter's
        // gradients.
X
Xin Pan 已提交
297
        // TODO(paddle-dev): Why is so special about "read" op?
298 299
        if (node->Op()->Type() == "read" && strategy_.enable_data_balance_) {
          node->Op()->SetAttr("throw_eof_exp", false);
X
Xin Pan 已提交
300
          CreateComputationalOps(&result, node, places_.size());
301
          const auto &data_var_names = node->Op()->Output("Out");
302
          InsertDataBalanceOp(&result, data_var_names);
F
fengjiayi 已提交
303
        } else {
X
Xin Pan 已提交
304
          CreateComputationalOps(&result, node, places_.size());
305 306
        }

C
chengduo 已提交
307 308 309
        if (!is_forwarding && places_.size() > 1) {
          // Currently, we assume that once gradient is generated, it can be
          // broadcast, and each gradient is only broadcast once.
310
          if (static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
C
chengduo 已提交
311 312 313
                                    OpProtoAndCheckerMaker::OpRoleAttrName())) &
                                static_cast<int>(OpRole::kBackward))) {
            try {
314 315
              auto backward_vars = boost::get<std::vector<std::string>>(
                  node->Op()->GetNullableAttr(
C
chengduo 已提交
316
                      OpProtoAndCheckerMaker::OpRoleVarAttrName()));
Y
yuyang18 已提交
317

C
chengduo 已提交
318
              PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
Y
yuyang18 已提交
319

C
chengduo 已提交
320 321 322 323
              for (size_t i = 0; i < backward_vars.size(); i += 2) {
                auto &p_name = backward_vars[i];
                auto &g_name = backward_vars[i + 1];
                VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;
Y
yuyang18 已提交
324

C
chengduo 已提交
325 326 327 328
                switch (strategy_.reduce_) {
                  case BuildStrategy::ReduceStrategy::kReduce:
                    cur_device_id = GetAppropriateDeviceID({g_name});
                    CreateReduceOp(&result, g_name, cur_device_id);
X
Xin Pan 已提交
329
                    graph->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
330
                        .emplace(g_name, cur_device_id);
C
chengduo 已提交
331 332 333 334 335 336 337 338 339 340 341 342 343 344
                    bcast_var_name_set[cur_device_id].emplace(p_name);
                    break;
                  case BuildStrategy::ReduceStrategy::kAllReduce:
                    if (IsSparseGradient(g_name)) {
                      CreateReduceOp(&result, g_name, 0);
                      CreateBroadcastOp(&result, g_name, 0);
                    } else {
                      InsertAllReduceOp(&result, g_name);
                    }
                    break;
                  default:
                    LOG(FATAL) << "Unknown reduce strategy ";
                    break;
                }
Y
yuyang18 已提交
345
              }
C
chengduo 已提交
346
            } catch (boost::bad_get e) {
C
chengduoZH 已提交
347
            }
Y
Yu Yang 已提交
348 349 350 351 352 353
          }
        }
      }
    }
  }

354 355 356 357 358 359 360 361 362 363 364 365 366
  bool use_gpu = false;
#ifdef PADDLE_WITH_CUDA
  use_gpu = nccl_ctxs_ != nullptr;
#endif

  if (use_gpu ||
      strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
    // Insert BCast Ops
    for (size_t dev_id = 0; dev_id < bcast_var_name_set.size(); ++dev_id) {
      auto &to_bcast_set = bcast_var_name_set[dev_id];
      for (auto &bcast_name : to_bcast_set) {
        CreateBroadcastOp(&result, bcast_name, dev_id);
      }
C
chengduoZH 已提交
367 368
    }
  }
Y
Yu Yang 已提交
369
  /*
X
Xin Pan 已提交
370 371 372
  Dependency graph has been constructed. However, there are still data
  hazards need to be handled.
 */
Y
Yu Yang 已提交
373
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
374

Y
Yu Yang 已提交
375 376 377 378
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);
X
Xin Pan 已提交
379
  PADDLE_ENFORCE(!ir::HasCircle(result));
Q
qiaolongfei 已提交
380
  return graph;
Y
Yu Yang 已提交
381 382
}

Y
Yancey1989 已提交
383 384 385
bool MultiDevSSAGraphBuilder::IsSparseGradient(const std::string &og) const {
  PADDLE_ENFORCE(all_vars_.count(og) != 0);
  if (all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS) {
C
fix ci  
chengduoZH 已提交
386 387 388
    return true;
  }
  return false;
389 390
}

391 392 393 394 395 396 397 398 399 400 401 402 403
void MultiDevSSAGraphBuilder::SetCommunicationContext(
    OpHandleBase *op_handle, const platform::Place &p) const {
#ifdef PADDLE_WITH_CUDA
  if (nccl_ctxs_ == nullptr) {
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
  }
#else
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
#endif
}

X
Xin Pan 已提交
404
void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result,
C
chengduoZH 已提交
405
                                                const std::string &p_name,
C
chengduoZH 已提交
406
                                                size_t src_dev_id) const {
C
chengduoZH 已提交
407
#ifdef PADDLE_WITH_CUDA
X
polish  
Xin Pan 已提交
408 409 410
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
C
chengduoZH 已提交
411
#else
X
polish  
Xin Pan 已提交
412 413 414
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
C
chengduoZH 已提交
415
#endif
X
Xin Pan 已提交
416
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
X
Xin Pan 已提交
417

X
Xin Pan 已提交
418
  auto *in =
X
Xin Pan 已提交
419
      result->Get<GraphVars>(kGraphVars).at(src_dev_id).at(p_name).back().get();
C
chengduoZH 已提交
420 421 422 423
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
424
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
425
    auto &vars = result->Get<GraphVars>(kGraphVars).at(i).at(p_name);
X
polish  
Xin Pan 已提交
426 427 428
    auto *out_var = new VarHandle(
        result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(),
        i, p_name, p);
C
chengduoZH 已提交
429 430 431 432 433
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
  }
}

X
Xin Pan 已提交
434
void MultiDevSSAGraphBuilder::CreateComputationalOp(ir::Graph *result,
435
                                                    ir::Node *node,
C
chengduoZH 已提交
436
                                                    int dev_id) const {
X
Xin Pan 已提交
437
  result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
438
      new ComputationOpHandle(result->CreateOpNode(node->Op()),
439 440
                              local_scopes_[dev_id], places_[dev_id]));
  CreateOpHandleIOs(result, node, dev_id);
C
chengduoZH 已提交
441 442
}

X
Xin Pan 已提交
443
void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result,
C
chengduoZH 已提交
444
                                                const std::string &og) const {
Y
Yu Yang 已提交
445
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
446
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
447 448
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
449
#else
X
Xin Pan 已提交
450
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
451 452
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
453
#endif
X
Xin Pan 已提交
454
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
Y
Yu Yang 已提交
455 456 457

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
458
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
459
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
Y
Yu Yang 已提交
460 461
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
Y
Yu Yang 已提交
462 463
    op_handle->AddInput(prev_grad.get());

X
Xin Pan 已提交
464
    auto var =
X
polish  
Xin Pan 已提交
465 466
        new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                      vars.size(), i, og, p);
Y
Yu Yang 已提交
467 468 469 470 471
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
}

472
void MultiDevSSAGraphBuilder::InsertDataBalanceOp(
X
Xin Pan 已提交
473
    ir::Graph *result, const std::vector<std::string> &datas) const {
F
fengjiayi 已提交
474
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
475
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
476 477
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
F
fengjiayi 已提交
478
#else
X
Xin Pan 已提交
479
  result->Get<GraphOps>(kGraphOps).emplace_back(new DataBalanceOpHandle(
X
polish  
Xin Pan 已提交
480 481
      result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation),
      local_scopes_, places_));
F
fengjiayi 已提交
482
#endif
X
Xin Pan 已提交
483
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
484 485 486 487
  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
    for (const std::string &d_name : datas) {
X
Xin Pan 已提交
488
      auto &vars = result->Get<GraphVars>(kGraphVars)[i][d_name];
489 490
      PADDLE_ENFORCE(!vars.empty());
      op_handle->AddInput(vars.back().get());
X
polish  
Xin Pan 已提交
491 492 493
      auto var = new VarHandle(
          result->CreateEmptyNode(d_name, ir::Node::Type::kVariable),
          vars.size(), i, d_name, p);
494 495 496 497 498 499
      vars.emplace_back(var);
      op_handle->AddOutput(var);
    }
  }
}

Y
Yu Yang 已提交
500 501 502 503 504 505 506 507 508 509 510 511
bool MultiDevSSAGraphBuilder::IsParameterGradientOnce(
    const std::string &og,
    std::unordered_set<std::string> *og_has_been_broadcast) const {
  bool is_pg_once =
      grad_names_.count(og) != 0 && og_has_been_broadcast->count(og) == 0;
  if (is_pg_once) {
    // Insert NCCL AllReduce Op
    og_has_been_broadcast->insert(og);
  }
  return is_pg_once;
}

X
Xin Pan 已提交
512 513
int MultiDevSSAGraphBuilder::GetOpDeviceID(const ir::Graph &graph,
                                           ir::Node *node) const {
Y
yuyang18 已提交
514
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
C
chengduoZH 已提交
515 516
    return -1;
  }
517
  int op_role = boost::get<int>(
518
      node->Op()->GetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
519 520
  if (op_role != static_cast<int>(framework::OpRole::kOptimize)) {
    return -1;
C
chengduoZH 已提交
521
  }
522
  auto param_grad = boost::get<std::vector<std::string>>(
X
Xin Pan 已提交
523
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
524 525

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
X
Xin Pan 已提交
526
  int dev_id = GetVarDeviceID(graph, param_grad[1]);
X
Xin Pan 已提交
527 528
  PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]",
                    node->Op()->Type(), param_grad[0], param_grad[1]);
529
  return dev_id;
530 531
}

X
Xin Pan 已提交
532 533
int MultiDevSSAGraphBuilder::GetVarDeviceID(const ir::Graph &graph,
                                            const std::string &varname) const {
X
Xin Pan 已提交
534
  auto &sharded_var_device = graph.Get<ShardedVarDevice>(kShardedVarDevice);
X
Xin Pan 已提交
535 536
  auto got = sharded_var_device.find(varname);
  return got == sharded_var_device.end() ? -1 : got->second;
C
chengduoZH 已提交
537 538
}

539 540
void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(
    ir::Graph *result, const std::string &loss_grad_name) const {
Y
Yu Yang 已提交
541 542 543
  for (size_t i = 0; i < places_.size(); ++i) {
// Insert ScaleCost OpHandle
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
544 545 546
    auto *communication_dev_ctx =
        nccl_ctxs_ ? nccl_ctxs_->DevCtx(places_[i])
                   : platform::DeviceContextPool::Instance().Get(places_[i]);
Y
Yu Yang 已提交
547 548 549 550
#else
    auto *communication_dev_ctx =
        platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
#endif
X
Xin Pan 已提交
551
    auto *op_handle = new ScaleLossGradOpHandle(
X
polish  
Xin Pan 已提交
552 553 554
        result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation),
        local_scopes_.size(), local_scopes_[i], places_[i],
        communication_dev_ctx);
X
Xin Pan 已提交
555
    result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
Y
Yu Yang 已提交
556 557 558 559 560 561 562

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

563 564 565 566
    CreateOpOutput(
        result, op_handle,
        result->CreateEmptyNode(loss_grad_name, ir::Node::Type::kVariable),
        places_[i], i);
Y
Yu Yang 已提交
567 568 569
  }
}

X
Xin Pan 已提交
570
void MultiDevSSAGraphBuilder::CreateComputationalOps(ir::Graph *result,
571
                                                     ir::Node *node,
T
typhoonzero 已提交
572 573
                                                     size_t num_places) const {
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
574 575
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
X
Xin Pan 已提交
576
    result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
577
        new ComputationOpHandle(result->CreateOpNode(node->Op()), s, p));
578
    CreateOpHandleIOs(result, node, scope_idx);
Y
Yu Yang 已提交
579 580 581
  }
}

X
Xin Pan 已提交
582
VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result,
C
chengduoZH 已提交
583 584
                                                   const std::string &og,
                                                   int dst_dev_id) const {
C
chengduoZH 已提交
585
#ifdef PADDLE_WITH_CUDA
X
Xin Pan 已提交
586
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
587 588
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
589
#else
X
Xin Pan 已提交
590
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
591 592
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
593
#endif
X
Xin Pan 已提交
594
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back().get();
C
chengduoZH 已提交
595 596 597

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
598
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
599
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
C
chengduoZH 已提交
600 601 602 603
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
    op_handle->AddInput(prev_grad.get());
  }
X
Xin Pan 已提交
604
  auto &vars = result->Get<GraphVars>(kGraphVars)[dst_dev_id][og];
X
polish  
Xin Pan 已提交
605 606 607
  auto var =
      new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                    vars.size(), dst_dev_id, og, places_[dst_dev_id]);
C
chengduoZH 已提交
608 609 610 611 612
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

613 614
// Find the first occurence of `prev_op_name` and make current `op` depend
// on it.
X
Xin Pan 已提交
615
void MultiDevSSAGraphBuilder::ConnectOp(ir::Graph *result, OpHandleBase *op,
Y
fix pe  
Yancey1989 已提交
616
                                        const std::string &prev_op_name) const {
X
Xin Pan 已提交
617
  for (auto &prev_op : result->Get<GraphOps>(kGraphOps)) {
Y
fix pe  
Yancey1989 已提交
618
    if (prev_op->Name() == prev_op_name) {
X
Xin Pan 已提交
619
      auto *dep_var = new DummyVarHandle(result->CreateControlDepVar());
Y
Yancey1989 已提交
620
      prev_op->AddOutput(dep_var);
X
Xin Pan 已提交
621
      result->Get<GraphDepVars>(kGraphDepVars).emplace(dep_var);
Y
fix pe  
Yancey1989 已提交
622
      op->AddInput(dep_var);
Y
Yancey1989 已提交
623 624 625 626
    }
  }
}

X
Xin Pan 已提交
627
void MultiDevSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result,
628
                                                ir::Node *node) const {
Y
Yancey1989 已提交
629
  int op_dev_id = -1;
630 631 632
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
X
Xin Pan 已提交
633
    input_var_names.push_back(input->Name());
634 635
  }
  for (ir::Node *output : node->outputs) {
X
Xin Pan 已提交
636
    output_var_names.push_back(output->Name());
637 638 639 640
  }

  if (node->Op()->Type() == "split_byref" ||
      node->Op()->Type() == "split_selected_rows") {
X
Xin Pan 已提交
641
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
642
    op_dev_id = GetVarDeviceID(*result, input_var_names[0]);
Y
Yancey1989 已提交
643
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
644 645
      op_dev_id = GetAppropriateDeviceID(input_var_names);
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
646
        result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
647
            .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
648 649
      }
    }
650
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
651
      result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
652
          .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
653
    }
654
  } else if (node->Op()->Type() == "concat") {
X
Xin Pan 已提交
655
    op_dev_id = GetVarDeviceID(*result, input_var_names[0]);
656
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
657
      result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
658
          .emplace(varname, op_dev_id);
Y
yi.wu 已提交
659
    }
Y
Yancey1989 已提交
660 661 662 663 664 665 666
  } else {
    PADDLE_ENFORCE(
        "the distribute training related op should be in [split_byref, "
        "concat].");
  }

  PADDLE_ENFORCE(op_dev_id != -1,
667 668
                 "can not find right place for distributed op: %s",
                 node->Op()->Type());
Y
Yancey1989 已提交
669

670 671
  CreateComputationalOp(result, node, op_dev_id);
  if (node->Op()->Type() == "concat") {
X
Xin Pan 已提交
672
    ConnectOp(result, result->Get<GraphOps>(kGraphOps).back().get(),
X
Xin Pan 已提交
673
              "fetch_barrier");
Y
Yancey1989 已提交
674 675 676
  }
}

677
// Create RPC related op handles that connects its in ops and out ops.
X
Xin Pan 已提交
678 679
void MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result,
                                          ir::Node *node) const {
Y
Yancey1989 已提交
680
  int op_dev_id = -1;
681
  if (node->Op()->Type() == "send") {
X
Xin Pan 已提交
682
    // TODO(paddle-dev): getting the first var is not safe.
X
Xin Pan 已提交
683
    op_dev_id = GetVarDeviceID(*result, node->inputs[0]->Name());
X
Xin Pan 已提交
684 685
    PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
                   "This hack no longer holds, please fix.");
Y
Yancey1989 已提交
686 687
    // the variable name which contains .block means it was splited by
    // split_byref op
688 689
    // so that we can balance the variable blocks to all the pserver
    // instances.
Y
Yancey1989 已提交
690
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce &&
X
Xin Pan 已提交
691
        node->inputs[0]->Name().find(".block") == std::string::npos) {
692 693
      std::vector<std::string> input_var_names;
      for (ir::Node *n : node->inputs) {
X
Xin Pan 已提交
694
        input_var_names.push_back(n->Name());
695 696 697
      }
      op_dev_id = GetAppropriateDeviceID(input_var_names);
      for (auto &varname : input_var_names) {
X
Xin Pan 已提交
698
        result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
699
            .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
700 701
      }
    }
702 703 704
  } else if (node->Op()->Type() == "recv") {
    std::vector<std::string> output_var_names;
    for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
705
      output_var_names.push_back(n->Name());
706 707 708
    }
    op_dev_id = GetAppropriateDeviceID(output_var_names);
    for (auto &varname : output_var_names) {
X
Xin Pan 已提交
709
      result->Get<ShardedVarDevice>(kShardedVarDevice)
X
Xin Pan 已提交
710
          .emplace(varname, op_dev_id);
Y
Yancey1989 已提交
711 712 713 714 715 716 717
    }
  } else {
    // send_barrier and fetch_barrier op can be scheduled on device 0
    op_dev_id = 0;
  }

  PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s",
718
                 node->Op()->Type());
Y
Yancey1989 已提交
719

X
Xin Pan 已提交
720
  result->Get<GraphOps>(kGraphOps).emplace_back(new RPCOpHandle(
721 722
      result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id],
      node->Op()->Type(), places_[op_dev_id]));
Y
fix pe  
Yancey1989 已提交
723

724
  // TODO(panyx0718): This might not be needed anymore.
725
  if (node->Op()->Type() == "send_barrier") {
X
Xin Pan 已提交
726
    ConnectOp(result, result->Get<GraphOps>(kGraphOps).back().get(), "send");
727
  } else if (node->Op()->Type() == "recv") {
X
Xin Pan 已提交
728
    ConnectOp(result, result->Get<GraphOps>(kGraphOps).back().get(),
X
Xin Pan 已提交
729
              "send_barrier");
730
  } else if (node->Op()->Type() == "fetch_barrier") {
X
Xin Pan 已提交
731
    ConnectOp(result, result->Get<GraphOps>(kGraphOps).back().get(), "recv");
732
  } else if (node->Op()->Type() == "send") {
Y
Yancey1989 已提交
733 734 735
    // do nothing
  } else {
    PADDLE_THROW(
Y
Yancey1989 已提交
736
        "rpc op should be in ["
737
        "send, send_barrier. recv, fetch_barrier]");
Y
Yancey1989 已提交
738 739
  }

740
  CreateOpHandleIOs(result, node, op_dev_id);
Y
Yu Yang 已提交
741 742
}

743
bool MultiDevSSAGraphBuilder::IsScaleLossOp(ir::Node *node) const {
Y
yuyang18 已提交
744
  return boost::get<int>(
745
             node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Fix bug  
yuyang18 已提交
746 747 748
             (static_cast<int>(OpRole::kBackward) |
              static_cast<int>(OpRole::kLoss)) &&
         !loss_var_name_.empty();  // If loss_var is empty. This is test mode
Y
Yu Yang 已提交
749
}
Y
Yu Yang 已提交
750 751 752
}  // namespace details
}  // namespace framework
}  // namespace paddle
X
Xin Pan 已提交
753 754

REGISTER_PASS(multi_device_pass,
X
Xin Pan 已提交
755 756 757 758 759 760
              paddle::framework::details::MultiDevSSAGraphBuilder)
    .RequirePassAttr(paddle::framework::details::kLossVarName)
    .RequirePassAttr(paddle::framework::details::kPlaces)
    .RequirePassAttr(paddle::framework::details::kParams)
    .RequirePassAttr(paddle::framework::details::kLocalScopes)
    .RequirePassAttr(paddle::framework::details::kStrategy);