multi_devices_graph_pass.cc 36.4 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
C
chengduoZH 已提交
14
#include <algorithm>
Y
Yancey1989 已提交
15
#include <fstream>
C
chengduoZH 已提交
16
#include <string>
C
chengduoZH 已提交
17
#include <utility>
C
chengduoZH 已提交
18 19
#include <vector>

20
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
C
chengduoZH 已提交
21
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
22
#include "paddle/fluid/framework/details/computation_op_handle.h"
23
#include "paddle/fluid/framework/details/data_balance_op_handle.h"
24
#include "paddle/fluid/framework/details/fused_broadcast_op_handle.h"
X
Xin Pan 已提交
25
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
C
chengduoZH 已提交
26
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yancey1989 已提交
27
#include "paddle/fluid/framework/details/rpc_op_handle.h"
Y
Yu Yang 已提交
28
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
X
better  
Xin Pan 已提交
29
#include "paddle/fluid/framework/ir/graph_helper.h"
X
Xin Pan 已提交
30
#include "paddle/fluid/framework/ir/node.h"
Y
Fix bug  
yuyang18 已提交
31
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
32
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
33

Y
Yu Yang 已提交
34 35 36
namespace paddle {
namespace framework {
namespace details {
X
Xin Pan 已提交
37

X
Xin Pan 已提交
38
namespace {
X
Xin Pan 已提交
39
// TODO(panyx0718): Clean this up as well.
X
Xin Pan 已提交
40 41 42 43 44
// all operators. NOTE that even we use a vector here, the operators is
// unordered.
typedef std::vector<OpHandleBase *> GraphOps;
const char kGraphOps[] = "ops";

C
chengduo 已提交
45 46 47 48 49 50
bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) {
  return boost::get<int>(
             node.Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
         static_cast<int>(role);
}

X
Xin Pan 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
void PolishGraphToSupportDataHazards(ir::Graph *graph) {
  for (auto &var_map : graph->Get<GraphVars>(kGraphVars)) {
    for (auto &name_pair : var_map) {
      if (name_pair.second.size() <= 1) {
        continue;
      }
      auto it_new = name_pair.second.rbegin();
      auto it_old = name_pair.second.rbegin();
      ++it_old;
      for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) {
        OpHandleBase *write_op = (*it_new)->GeneratedOp();
        const auto &read_ops = (*it_old)->PendingOps();

        for (auto *read_op : read_ops) {
          // Manually add a dependency var from read_op to write_op;
          if (read_op == write_op) {
            // Read Write is the same op.
            continue;
          }
          bool has_dep = false;
          for (auto *r_out : read_op->Outputs()) {
            for (auto *w_in : write_op->Inputs()) {
              if (r_out->Node() == w_in->Node()) {
                has_dep = true;
                break;
              }
            }
          }
          if (has_dep) continue;

          auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar());
          read_op->AddOutput(dep_var);
          write_op->AddInput(dep_var);
          graph->Get<GraphDepVars>(kGraphDepVars).emplace(dep_var);
        }
      }
    }
  }
}

VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node,
                                      const platform::Place &place,
                                      size_t place_offset) {
  auto &var_holders = graph->Get<GraphVars>(kGraphVars)[place_offset];
  auto &var_holder = var_holders[node->Name()];
  VarHandle *var = nullptr;
  if (var_holder.empty()) {
    if (node->Var()) {
      var = new VarHandle(graph->CreateVarNode(node->Var()), 0, place_offset,
                          node->Name(), place);
    } else {
      var = new VarHandle(
          graph->CreateEmptyNode(node->Name(), ir::Node::Type::kVariable), 0,
          place_offset, node->Name(), place);
    }
    var_holder.emplace_back(var);
  } else {
X
clean1  
Xin Pan 已提交
108
    var = *var_holder.rbegin();
X
Xin Pan 已提交
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
  }
  return var;
}

void CreateOpOutput(ir::Graph *graph, OpHandleBase *op_handle,
                    ir::Node *new_node, const platform::Place &place,
                    size_t place_offset) {
  auto &vars =
      graph->Get<GraphVars>(kGraphVars)[place_offset][new_node->Name()];
  size_t version = vars.size();
  auto var =
      new VarHandle(new_node, version, place_offset, new_node->Name(), place);
  vars.emplace_back(var);
  op_handle->AddOutput(var);
}

void AddOutputToLeafOps(ir::Graph *graph) {
  for (auto &op : graph->Get<GraphOps>(kGraphOps)) {
    if (!op->Outputs().empty()) {
      continue;
    }
    auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar());
    graph->Get<GraphDepVars>(kGraphDepVars).emplace(dummy_leaf);
    op->AddOutput(dummy_leaf);
  }
}
}  // namespace
Y
Yu Yang 已提交
136

137
void MultiDevSSAGraphBuilderBase::Init() const {
X
clean  
Xin Pan 已提交
138 139
  all_vars_.clear();

X
Xin Pan 已提交
140 141 142 143
  loss_var_name_ = Get<const std::string>(kLossVarName);
  places_ = Get<const std::vector<platform::Place>>(kPlaces);
  local_scopes_ = Get<const std::vector<Scope *>>(kLocalScopes);
  strategy_ = Get<const BuildStrategy>(kStrategy);
P
peizhilin 已提交
144
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
145
  nccl_ctxs_ = &Get<platform::NCCLContextMap>("nccl_ctxs");
Y
Yu Yang 已提交
146
#endif
Y
Yu Yang 已提交
147 148
}

149
std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilderBase::ApplyImpl(
X
Xin Pan 已提交
150
    std::unique_ptr<ir::Graph> graph) const {
X
Xin Pan 已提交
151
  Init();
152
  std::vector<ir::Node *> sorted_ops = SortOperations(*graph);
C
chengduo 已提交
153

X
Xin Pan 已提交
154 155
  auto nodes = graph->ReleaseNodes();
  ir::Graph &result = *graph;
156 157

  for (auto &node : nodes) {
X
Xin Pan 已提交
158
    if (node->IsVar() && node->Var()) {
X
Xin Pan 已提交
159
      all_vars_.emplace(node->Name(), node->Var());
160
    }
C
fix ci  
chengduoZH 已提交
161
  }
Y
Yu Yang 已提交
162 163

  // We cannot invoke resize. It is a bug of GCC 4.8
X
Xin Pan 已提交
164 165 166
  result.Set(kGraphVars, new GraphVars(places_.size()));
  result.Set(kGraphDepVars, new GraphDepVars);
  result.Set(kGraphOps, new GraphOps);
167

Y
Yu Yang 已提交
168
  bool is_forwarding = true;
169
  bool insert_collection_ops = NeedCollectiveOps();
170 171 172 173
  if (strategy_.async_mode_) {
    // async mode did not need to merge gradient
    insert_collection_ops = false;
  }
X
Xin Pan 已提交
174

X
better  
Xin Pan 已提交
175
  for (ir::Node *node : sorted_ops) {
176 177
    if (DealWithSpecialOp(&result, node)) {
      continue;
Y
Yu Yang 已提交
178
    } else {
179 180 181 182 183 184 185 186 187
      // This op runs on all devices
      if (IsScaleLossOp(node)) {
        // user can customize loss@grad if not use_default_grad_scale_
        InsertScaleLossGradOp(&result, node);
        // This assumes the backward generating code will ensure IsScaleLossOp
        // is true only for the op that scale the final scalar loss.
        // It also assumes backward op will always follow the forward op in
        // the block.
        is_forwarding = false;
C
chengduo 已提交
188
      } else {
189 190
        CreateComputationalOps(&result, node, places_.size());
      }
191

192 193 194
      // Insert collection ops
      if (!is_forwarding && insert_collection_ops) {
        try {
C
chengduo 已提交
195 196 197 198
          bool is_bk_op =
              static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
                                    OpProtoAndCheckerMaker::OpRoleAttrName())) &
                                static_cast<int>(OpRole::kBackward));
199 200
          // optimize op is already processed in DealWithSpecialOp,
          // here we only consider backward op
C
chengduo 已提交
201
          if (!is_bk_op) continue;
202

203 204 205 206 207 208 209 210 211 212 213 214
          /*
           * the op that will generate the gradient of on parameter will have
           one attr op_role_var
           * to record the parameter and gradient, like:
            attrs {
              name: "op_role_var"
              type: STRINGS
              strings: "fc_1.b_0"
              strings: "fc_1.b_0@GRAD"
            }
           */

C
chengduo 已提交
215 216
          // Currently, we assume that once gradient is generated, it can be
          // broadcast, and each gradient is only broadcast once.
217 218 219 220 221 222 223 224
          auto backward_vars =
              boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
                  OpProtoAndCheckerMaker::OpRoleVarAttrName()));
          PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);

          for (size_t i = 0; i < backward_vars.size(); i += 2) {
            auto &p_name = backward_vars[i];
            auto &g_name = backward_vars[i + 1];
225
            VLOG(3) << "Bcast " << g_name << " for parameter " << p_name;
226 227

            InsertCollectiveOp(&result, p_name, g_name);
Y
Yu Yang 已提交
228
          }
229
        } catch (boost::bad_get e) {
Y
Yu Yang 已提交
230 231 232 233
        }
      }
    }
  }
234

235 236
  InsertPostprocessOps(&result);

Y
Yu Yang 已提交
237
  /*
X
Xin Pan 已提交
238 239
  Dependency graph has been constructed. However, there are still data
  hazards need to be handled.
240
  */
Y
Yu Yang 已提交
241
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
242

Y
Yu Yang 已提交
243 244 245 246
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);
F
flame 已提交
247
  result.Erase(kGraphOps);
Q
qiaolongfei 已提交
248
  return graph;
Y
Yu Yang 已提交
249 250
}

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
void MultiDevSSAGraphBuilderBase::InsertScaleLossGradOp(
    ir::Graph *result, const ir::Node *node) const {
  // user can customize loss@grad if not use_default_grad_scale_
  size_t loss_scale = 0;
  switch (this->strategy_.gradient_scale_) {
    case BuildStrategy::GradientScaleStrategy::kOne:
      loss_scale = 1;
      break;
    case BuildStrategy::GradientScaleStrategy::kCoeffNumDevice:
      loss_scale = Get<size_t>(kNRanks);
      break;
    case BuildStrategy::GradientScaleStrategy::kCustomized:
      loss_scale = 0;
      break;
    default:
      LOG(FATAL) << "Unknown gradient scale strategy.";
      break;
  }

Q
Qiao Longfei 已提交
270 271
  VLOG(3) << "loss_scale: " << loss_scale;

272 273 274 275 276 277 278 279
  if (loss_scale) {
    // TODO(paddle-dev): Why is there no input for this op_handle?
    auto loss_grad_name = node->Op()->OutputArgumentNames()[0];
    auto out_dtype = this->all_vars_.at(loss_grad_name)->GetDataType();
    this->CreateScaleLossGradOp(result, loss_grad_name, node->outputs[0],
                                loss_scale, out_dtype);
  }
}
C
chengduo 已提交
280

281 282 283 284
std::vector<ir::Node *> MultiDevSSAGraphBuilderBase::SortOperations(
    const ir::Graph &graph) const {
  return ir::TopologySortOperations(graph);
}
C
chengduo 已提交
285

286 287 288 289 290 291 292
bool MultiDevSSAGraphBuilderBase::UseGPU() const {
  bool use_gpu = false;
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
  use_gpu = nccl_ctxs_ != nullptr;
#endif
  return use_gpu;
}
C
chengduo 已提交
293

294 295
bool MultiDevSSAGraphBuilderBase::NeedCollectiveOps() const {
  return Get<size_t>(kNRanks) > 1;
C
chengduo 已提交
296 297
}

298 299 300
void MultiDevSSAGraphBuilderBase::CreateOpHandleIOs(ir::Graph *result,
                                                    ir::Node *node,
                                                    size_t place_id) const {
C
chengduo 已提交
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
  auto p = places_[place_id];
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));

  for (ir::Node *input : node->inputs) {
    VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id);
    op_handle->AddInput(var);
  }

  for (ir::Node *output : node->outputs) {
    ir::Node *new_node = nullptr;
    if (output->Var()) {
      new_node = result->CreateVarNode(output->Var());
    } else {
      new_node =
          result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
    }
    CreateOpOutput(result, op_handle, new_node, p, place_id);
  }
}

323
void MultiDevSSAGraphBuilderBase::SetCommunicationContext(
324
    OpHandleBase *op_handle, const platform::Place &p) const {
P
peizhilin 已提交
325
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
326 327 328 329 330 331 332 333 334 335
  if (nccl_ctxs_ == nullptr) {
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
  }
#else
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
#endif
}

336 337 338
void MultiDevSSAGraphBuilderBase::CreateBroadcastOp(ir::Graph *result,
                                                    const std::string &p_name,
                                                    size_t src_dev_id) const {
P
peizhilin 已提交
339
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
polish  
Xin Pan 已提交
340 341 342
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
C
chengduoZH 已提交
343
#else
X
polish  
Xin Pan 已提交
344 345 346
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
C
chengduoZH 已提交
347
#endif
X
Xin Pan 已提交
348
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
X
Xin Pan 已提交
349

X
Xin Pan 已提交
350
  auto *in =
X
clean1  
Xin Pan 已提交
351
      result->Get<GraphVars>(kGraphVars).at(src_dev_id).at(p_name).back();
C
chengduoZH 已提交
352 353 354 355
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
356
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
357
    auto &vars = result->Get<GraphVars>(kGraphVars).at(i).at(p_name);
X
polish  
Xin Pan 已提交
358 359 360
    auto *out_var = new VarHandle(
        result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(),
        i, p_name, p);
C
chengduoZH 已提交
361 362 363 364 365
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
  }
}

366
void MultiDevSSAGraphBuilderBase::CreateFusedBroadcastOp(
367 368
    ir::Graph *result,
    const std::vector<std::unordered_set<std::string>> &bcast_varnames) const {
P
peizhilin 已提交
369
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
#else
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
#endif
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
  }

  for (size_t dev_id = 0; dev_id < bcast_varnames.size(); ++dev_id) {
    for (auto &p_name : bcast_varnames[dev_id]) {
      auto *in =
X
clean1  
Xin Pan 已提交
388
          result->Get<GraphVars>(kGraphVars).at(dev_id).at(p_name).back();
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
      op_handle->AddInput(in);
      for (size_t out_dev_id = 0; out_dev_id < places_.size(); ++out_dev_id) {
        auto &p = places_[out_dev_id];
        auto &vars =
            result->Get<GraphVars>(kGraphVars).at(out_dev_id).at(p_name);
        auto *out_var = new VarHandle(
            result->CreateEmptyNode(p_name, ir::Node::Type::kVariable),
            vars.size(), out_dev_id, p_name, p);
        vars.emplace_back(out_var);
        op_handle->AddOutput(out_var);
      }
    }
  }
}

404 405
void MultiDevSSAGraphBuilderBase::CreateComputationalOp(ir::Graph *result,
                                                        ir::Node *node,
406
                                                        size_t dev_id) const {
X
Xin Pan 已提交
407
  result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
408
      new ComputationOpHandle(result->CreateOpNode(node->Op()),
S
sneaxiy 已提交
409
                              local_scopes_[dev_id], places_[dev_id], dev_id));
410
  CreateOpHandleIOs(result, node, dev_id);
C
chengduoZH 已提交
411 412
}

413 414
void MultiDevSSAGraphBuilderBase::CreateAllReduceOp(
    ir::Graph *result, const std::string &og) const {
P
peizhilin 已提交
415
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
416
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
417 418
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
419
#else
X
Xin Pan 已提交
420
  result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
X
polish  
Xin Pan 已提交
421 422
      result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
423
#endif
X
clean1  
Xin Pan 已提交
424
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
Y
Yu Yang 已提交
425 426 427

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
428
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
429
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
Y
Yu Yang 已提交
430 431
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
432
    op_handle->AddInput(prev_grad);
Y
Yu Yang 已提交
433

X
Xin Pan 已提交
434
    auto var =
X
polish  
Xin Pan 已提交
435 436
        new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                      vars.size(), i, og, p);
Y
Yu Yang 已提交
437 438 439 440 441
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
}

442
void MultiDevSSAGraphBuilderBase::CreateScaleLossGradOp(
443
    ir::Graph *result, const std::string &loss_grad_name,
444 445
    ir::Node *out_var_node, size_t loss_scale,
    proto::VarType::Type dtype) const {
Y
Yu Yang 已提交
446
  for (size_t i = 0; i < places_.size(); ++i) {
Y
yuyang18 已提交
447
    auto *dev_ctx = platform::DeviceContextPool::Instance().Get(places_[i]);
X
Xin Pan 已提交
448
    auto *op_handle = new ScaleLossGradOpHandle(
X
polish  
Xin Pan 已提交
449
        result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation),
450
        loss_scale, local_scopes_[i], places_[i], dev_ctx, dtype);
X
Xin Pan 已提交
451
    result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
Y
Yu Yang 已提交
452 453 454 455 456 457 458

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

459 460
    CreateOpOutput(result, op_handle,
                   result->CreateVarNode(out_var_node->Var()), places_[i], i);
Y
Yu Yang 已提交
461 462 463
  }
}

464 465
void MultiDevSSAGraphBuilderBase::CreateComputationalOps(
    ir::Graph *result, ir::Node *node, size_t num_places) const {
T
typhoonzero 已提交
466
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
467 468
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
S
sneaxiy 已提交
469 470
    result->Get<GraphOps>(kGraphOps).emplace_back(new ComputationOpHandle(
        result->CreateOpNode(node->Op()), s, p, scope_idx));
471
    CreateOpHandleIOs(result, node, scope_idx);
Y
Yu Yang 已提交
472 473 474
  }
}

475 476
VarHandle *MultiDevSSAGraphBuilderBase::CreateReduceOp(
    ir::Graph *result, const std::string &og, size_t dst_dev_id) const {
P
peizhilin 已提交
477
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
478
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
479 480
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
481
#else
X
Xin Pan 已提交
482
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
483 484
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
485
#endif
X
clean1  
Xin Pan 已提交
486
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
C
chengduoZH 已提交
487 488 489

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
490
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
491
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
C
chengduoZH 已提交
492 493
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
494
    op_handle->AddInput(prev_grad);
C
chengduoZH 已提交
495
  }
X
Xin Pan 已提交
496
  auto &vars = result->Get<GraphVars>(kGraphVars)[dst_dev_id][og];
X
polish  
Xin Pan 已提交
497 498 499
  auto var =
      new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                    vars.size(), dst_dev_id, og, places_[dst_dev_id]);
C
chengduoZH 已提交
500 501 502 503 504
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

505 506 507 508 509 510 511 512 513 514 515 516 517
bool MultiDevSSAGraphBuilderBase::IsScaleLossOp(ir::Node *node) const {
  return boost::get<int>(
             node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
             (static_cast<int>(OpRole::kBackward) |
              static_cast<int>(OpRole::kLoss)) &&
         !loss_var_name_.empty();  // If loss_var is empty. This is test mode
}

bool MultiDevSSAGraphBuilderBase::IsSparseGradient(
    const std::string &og) const {
  PADDLE_ENFORCE(all_vars_.count(og) != 0);
  if (all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS) {
    return true;
518
  }
519 520 521 522 523 524 525 526 527 528 529
  return false;
}

void AllReduceSSAGraphBuilder::InsertCollectiveOp(
    ir::Graph *result, const std::string &p_name,
    const std::string &g_name) const {
  if (IsSparseGradient(g_name)) {
    CreateReduceOp(result, g_name, 0);
    CreateBroadcastOp(result, g_name, 0);
  } else {
    CreateAllReduceOp(result, g_name);
530
  }
531
}
532

533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
int BalanceVarSSAGraphBuilder::GetVarDeviceID(
    const std::string &varname) const {
  auto got = sharded_var_device_.find(varname);
  if (got == sharded_var_device_.end()) {
    auto pos = varname.find(framework::kNewGradSuffix);
    if (pos != std::string::npos) {
      got = sharded_var_device_.find(varname.substr(0, pos));
    }
  }
  return got == sharded_var_device_.end() ? -1 : got->second;
}

int BalanceVarSSAGraphBuilder::GetOpDeviceID(ir::Node *node) const {
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
    return -1;
  }
  if (!OpHaveRole(*node, framework::OpRole::kOptimize)) {
    return -1;
  }
  auto param_grad = boost::get<std::vector<std::string>>(
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
  int dev_id = GetVarDeviceID(param_grad[1]);
  PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]",
                    node->Op()->Type(), param_grad[0], param_grad[1]);
  return dev_id;
}

size_t BalanceVarSSAGraphBuilder::GetAppropriateDeviceID(
    const std::vector<std::string> &var_names) const {
  int64_t numel_sum = 0;
  for (auto var_name : var_names) {
    if (all_vars_.find(var_name) == all_vars_.end()) continue;
    auto var_desc = all_vars_.at(var_name);
    PADDLE_ENFORCE_NOT_NULL(var_desc);
    auto dim = framework::make_ddim(var_desc->GetShape());
    int64_t numel = framework::product(dim);
    PADDLE_ENFORCE_GT(numel, 0);
    numel_sum += numel;
  }

  auto smallest =
      std::min_element(std::begin(balance_vars_), std::end(balance_vars_));
  size_t dev_id =
      static_cast<size_t>(std::distance(std::begin(balance_vars_), smallest));
  balance_vars_[dev_id] += numel_sum;
  return dev_id;
}

void BalanceVarSSAGraphBuilder::ResetState() const {
  balance_vars_.clear();
  sharded_var_device_.clear();

  balance_vars_.resize(places_.size(), 0);
}

void ReduceSSAGraphBuilder::Init() const {
  MultiDevSSAGraphBuilderBase::Init();
  ResetState();
}

void ReduceSSAGraphBuilder::ResetState() const {
  BalanceVarSSAGraphBuilder::ResetState();
  bcast_var_name_set_.clear();
  bcast_var_name_set_.resize(places_.size());
}

void ReduceSSAGraphBuilder::InsertCollectiveOp(
    ir::Graph *result, const std::string &p_name,
    const std::string &g_name) const {
  size_t cur_device_id = GetAppropriateDeviceID({g_name});
  CreateReduceOp(result, g_name, cur_device_id);
  sharded_var_device_.emplace(g_name, cur_device_id);
  bcast_var_name_set_[cur_device_id].emplace(p_name);
}

bool ReduceSSAGraphBuilder::DealWithSpecialOp(ir::Graph *result,
                                              ir::Node *node) const {
  int op_dev_id = BalanceVarSSAGraphBuilder::GetOpDeviceID(node);
  if (op_dev_id != -1) {
    // This op only runs on one specific device.
    CreateComputationalOp(result, node, op_dev_id);
    for (ir::Node *n : node->outputs) {
      sharded_var_device_.emplace(n->Name(), op_dev_id);
    }
    return true;
  }
  return false;
}

void ReduceSSAGraphBuilder::InsertPostprocessOps(ir::Graph *result) const {
  if (UseGPU()) {
    if (strategy_.fuse_broadcast_op_) {
      CreateFusedBroadcastOp(result, bcast_var_name_set_);
    } else {
      for (size_t dev_id = 0; dev_id < bcast_var_name_set_.size(); ++dev_id) {
        auto &to_bcast_set = bcast_var_name_set_[dev_id];
        for (auto &bcast_name : to_bcast_set) {
          CreateBroadcastOp(result, bcast_name, dev_id);
        }
Y
Yancey1989 已提交
634 635
      }
    }
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
  }
}

int ReduceSSAGraphBuilder::GetOpDeviceID(
    ir::Node *node,
    std::unordered_map<std::string, std::vector<ir::Node *>> *delay_ops) const {
  if (!OpHaveRole(*node, framework::OpRole::kOptimize)) {
    return -1;
  }

  auto param_grad = boost::get<std::vector<std::string>>(
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
  int dev_id = GetVarDeviceID(param_grad[1]);

  if (dev_id == -1) {
    (*delay_ops)[param_grad[1]].push_back(node);
    return -2;
  }
  return dev_id;
}

std::vector<ir::Node *> ReduceSSAGraphBuilder::SortOperations(
    const ir::Graph &graph) const {
  std::vector<ir::Node *> sorted_ops = ir::TopologySortOperations(graph);
  return SortForReduceMode(sorted_ops);
}

std::vector<ir::Node *> ReduceSSAGraphBuilder::SortForReduceMode(
    const std::vector<ir::Node *> &topo_ops) const {
  std::vector<ir::Node *> sorted_ops;
  std::unordered_map<std::string, std::vector<ir::Node *>> delayed_op;
  sorted_ops.reserve(topo_ops.size());
  ResetState();

  auto insert_delayed_op = [&](const std::string &var_name, int dev_id) {
    sharded_var_device_.emplace(var_name, dev_id);
    if (delayed_op.count(var_name)) {
      auto &ops = delayed_op.at(var_name);
      sorted_ops.insert(sorted_ops.end(), ops.begin(), ops.end());
      delayed_op.at(var_name).clear();
Y
Yancey1989 已提交
678
    }
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715
  };

  for (ir::Node *node : topo_ops) {
    int op_dev_id = GetOpDeviceID(node, &delayed_op);
    if (op_dev_id > -1) {
      // This op only runs on one specific device.
      sorted_ops.emplace_back(node);
      for (ir::Node *n : node->outputs) {
        insert_delayed_op(n->Name(), op_dev_id);
      }
    } else if (op_dev_id == -1) {
      // This op runs on all devices, and its output may have parameter's
      // gradients.
      sorted_ops.emplace_back(node);
      bool is_bk_op =
          static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
                                OpProtoAndCheckerMaker::OpRoleAttrName())) &
                            static_cast<int>(OpRole::kBackward));
      if (!is_bk_op) continue;
      // Currently, we assume that once gradient is generated, it can be
      // broadcast, and each gradient is only broadcast once.
      std::vector<std::string> backward_vars;
      try {
        backward_vars =
            boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
                OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      } catch (boost::bad_get e) {
      }
      PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);

      for (size_t i = 0; i < backward_vars.size(); i += 2) {
        auto &g_name = backward_vars[i + 1];
        size_t cur_device_id = GetAppropriateDeviceID({g_name});
        insert_delayed_op(g_name, static_cast<int>(cur_device_id));
      }
    } else if (op_dev_id == -2) {
      // The Op on which the Op depends has not yet been generated.
Y
yi.wu 已提交
716
    }
Y
Yancey1989 已提交
717 718
  }

719
  PADDLE_ENFORCE_EQ(sorted_ops.size(), topo_ops.size());
Y
Yancey1989 已提交
720

721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
  ResetState();
  return sorted_ops;
}

void DistSSAGraphBuilder::Init() const {
  MultiDevSSAGraphBuilderBase::Init();
  ResetState();
}

void DistSSAGraphBuilder::ResetState() const {
  BalanceVarSSAGraphBuilder::ResetState();
  bcast_var_name_set_.clear();
  bcast_var_name_set_.resize(places_.size());
}

bool DistSSAGraphBuilder::DealWithSpecialOp(ir::Graph *result,
                                            ir::Node *node) const {
  bool insert_op = false;
  if (OpHaveRole(*node, OpRole::kRPC)) {
740 741 742 743
    // in async_mode, each graph will send it's own gradient.
    if (strategy_.async_mode_ && node->Op()->Type() == "send") {
      return false;
    }
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
    int op_dev_id = CreateRPCOp(result, node);
    PADDLE_ENFORCE(op_dev_id != -1,
                   "Can not schedule the RPC operator to the right place.");
    if (node->Op()->Type() == "recv") {
      auto recv_vars_attr =
          boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
              OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      PADDLE_ENFORCE(recv_vars_attr.size() == 2UL);  // [parameter, gradient]
      if (recv_vars_attr[0].find(".block") == std::string::npos) {
        bcast_var_name_set_[op_dev_id].emplace(recv_vars_attr[0]);
      }
    }
    insert_op = true;
    need_broadcast_var_ = true;
  } else if (OpHaveRole(*node, OpRole::kDist)) {
Q
Qiao Longfei 已提交
759 760 761 762 763
    // in async_mode, each graph will send it's own gradient, do not need to
    // merge gradient.
    if (strategy_.async_mode_ && node->Op()->Type() != "concat") {
      return false;
    }
764 765
    int op_dev_id = CreateDistTrainOp(result, node);
    if (node->Op()->Type() == "concat") {
766 767
      // the input(block of parameter) of concat is on different device,
      // the output(parameter) will on one device.
768 769 770 771 772 773 774
      auto origin_param_name = node->Op()->OutputArgumentNames()[0];
      bcast_var_name_set_[op_dev_id].emplace(origin_param_name);
    }
    insert_op = true;
  } else {
    int op_dev_id = GetOpDeviceID(node);
    if (op_dev_id != -1) {  // This op only runs on one specific device.
775
      // optimize op will be processed here.
776 777 778 779 780 781 782 783
      CreateComputationalOp(result, node, op_dev_id);
      for (ir::Node *n : node->outputs) {
        sharded_var_device_.emplace(n->Name(), op_dev_id);
      }
      insert_op = true;
    }
  }
  return insert_op;
W
Wu Yi 已提交
784 785 786
}

void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) {
X
clean1  
Xin Pan 已提交
787
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
788 789 790 791 792 793
  for (ir::Node *input : node->inputs) {
    VarHandle *var = nullptr;
    for (int place_offset = 0; place_offset < num_places; ++place_offset) {
      auto &var_holders = result->Get<GraphVars>(kGraphVars)[place_offset];
      auto &var_holder = var_holders[input->Name()];
      if (!var_holder.empty()) {
X
clean1  
Xin Pan 已提交
794
        var = *var_holder.rbegin();
W
Wu Yi 已提交
795 796 797
        op_handle->AddInput(var);
      }
    }
Y
Yancey1989 已提交
798 799 800
  }
}

801
// Create RPC related op handles that connects its in ops and out ops.
802
int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const {
Y
Yancey1989 已提交
803
  int op_dev_id = -1;
804
  if (node->Op()->Type() == "send") {
X
Xin Pan 已提交
805
    // TODO(paddle-dev): getting the first var is not safe.
806
    op_dev_id = GetVarDeviceID(node->inputs[0]->Name());
X
Xin Pan 已提交
807 808
    PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
                   "This hack no longer holds, please fix.");
Y
Yancey1989 已提交
809 810 811
    // the variable name which contains .block means it was splited by
    // split_byref op
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce &&
X
Xin Pan 已提交
812
        node->inputs[0]->Name().find(".block") == std::string::npos) {
813 814
      std::vector<std::string> input_var_names;
      for (ir::Node *n : node->inputs) {
X
Xin Pan 已提交
815
        input_var_names.push_back(n->Name());
816
      }
W
Wu Yi 已提交
817 818 819 820
      auto send_param_grad = boost::get<std::vector<std::string>>(
          node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
      op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
M
minqiyang 已提交
821 822
      VLOG(10) << "send grad " << input_var_names[0] << " origin "
               << send_param_grad[1] << " place: " << op_dev_id;
823
      for (auto &varname : input_var_names) {
824
        sharded_var_device_.emplace(varname, op_dev_id);
Y
Yancey1989 已提交
825
      }
826
      sharded_var_device_.emplace(send_param_grad[1], op_dev_id);
Y
Yancey1989 已提交
827
    }
828 829 830
  } else if (node->Op()->Type() == "recv") {
    std::vector<std::string> output_var_names;
    for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
831
      output_var_names.push_back(n->Name());
832
    }
W
Wu Yi 已提交
833 834
    auto recv_param_grad = boost::get<std::vector<std::string>>(
        node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
Q
Qiao Longfei 已提交
835
    if (recv_param_grad.size() == 2U && !strategy_.async_mode_) {
836
      op_dev_id = GetVarDeviceID(recv_param_grad[1]);
M
minqiyang 已提交
837 838 839
      VLOG(10) << "recv param " << recv_param_grad[0]
               << " get grad place: " << recv_param_grad[1]
               << " place: " << op_dev_id;
W
Wu Yi 已提交
840 841 842
    } else {
      op_dev_id = GetAppropriateDeviceID(output_var_names);
    }
843
    for (auto &varname : output_var_names) {
844
      sharded_var_device_.emplace(varname, op_dev_id);
Y
Yancey1989 已提交
845 846
    }
  } else {
W
Wu Yi 已提交
847
    // send_barrier, fetch_barrier will run on place 0;
Y
Yancey1989 已提交
848 849 850 851
    op_dev_id = 0;
  }

  PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s",
852
                 node->Op()->Type());
X
Xin Pan 已提交
853
  result->Get<GraphOps>(kGraphOps).emplace_back(new RPCOpHandle(
854 855
      result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id],
      node->Op()->Type(), places_[op_dev_id]));
Y
fix pe  
Yancey1989 已提交
856

W
Wu Yi 已提交
857 858
  if (node->Op()->Type() == "send") {
    CreateOpHandleIOs(result, node, op_dev_id);
Y
Yancey1989 已提交
859
  } else {
W
Wu Yi 已提交
860 861 862
    // send_barrier, recv, fetch_barrier's inputs are deps var, get them from
    // all places
    auto p = places_[op_dev_id];
X
clean1  
Xin Pan 已提交
863
    auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
864 865
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
Y
Yancey1989 已提交
866

W
Wu Yi 已提交
867 868 869 870
    SetOpInputsAllPlaces(result, node, places_.size());
    for (ir::Node *output : node->outputs) {
      int outvar_dev_id = op_dev_id;
      if (node->Op()->Type() == "fetch_barrier") {
871
        outvar_dev_id = GetVarDeviceID(output->Name());
Q
Qiao Longfei 已提交
872
        PADDLE_ENFORCE_NE(outvar_dev_id, -1, "output name %s", output->Name());
W
Wu Yi 已提交
873 874 875 876 877 878 879 880 881 882 883 884
      }
      p = places_[outvar_dev_id];
      ir::Node *new_node = nullptr;
      if (output->Var()) {
        new_node = result->CreateVarNode(output->Var());
      } else {
        new_node =
            result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
      }
      CreateOpOutput(result, op_handle, new_node, p, outvar_dev_id);
    }
  }
Y
Yancey1989 已提交
885
  return op_dev_id;
Y
Yu Yang 已提交
886 887
}

888 889 890 891 892 893 894
int DistSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result,
                                           ir::Node *node) const {
  int op_dev_id = -1;
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
    input_var_names.push_back(input->Name());
C
chengduo 已提交
895
  }
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
  for (ir::Node *output : node->outputs) {
    output_var_names.push_back(output->Name());
  }

  if (node->Op()->Type() == "split_byref" ||
      node->Op()->Type() == "split_selected_rows" ||
      node->Op()->Type() == "split_ids") {
    // TODO(paddle-dev): getting the first var is not safe.
    op_dev_id = GetVarDeviceID(input_var_names[0]);
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
      op_dev_id = GetAppropriateDeviceID(input_var_names);
      for (auto &varname : input_var_names) {
        sharded_var_device_.emplace(varname, op_dev_id);
      }
    }
    for (auto &varname : output_var_names) {
      sharded_var_device_.emplace(varname, op_dev_id);
    }
  } else if (node->Op()->Type() == "concat") {
    op_dev_id = GetVarDeviceID(input_var_names[0]);
    for (auto &varname : output_var_names) {
      sharded_var_device_.emplace(varname, op_dev_id);
    }
  } else {
    LOG(ERROR) << "got unexpected dist op: " << node->Op()->Type();
    PADDLE_THROW(
        "the distribute training related op should be in [split_byref, "
        "concat].");
  }

  PADDLE_ENFORCE(op_dev_id != -1,
                 "can not find right place for distributed op: %s",
                 node->Op()->Type());

  CreateComputationalOp(result, node, op_dev_id);
  return op_dev_id;
C
chengduo 已提交
932 933
}

934 935 936
void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result,
                                             const std::string &p_name,
                                             const std::string &g_name) const {
937
  // collective gradient to each device
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
  size_t cur_device_id = 0;
  switch (strategy_.reduce_) {
    case BuildStrategy::ReduceStrategy::kReduce:
      cur_device_id = GetAppropriateDeviceID({g_name});
      CreateReduceOp(result, g_name, cur_device_id);
      sharded_var_device_.emplace(g_name, cur_device_id);
      break;
    case BuildStrategy::ReduceStrategy::kAllReduce:
      if (IsSparseGradient(g_name)) {
        CreateReduceOp(result, g_name, 0);
        CreateBroadcastOp(result, g_name, 0);
      } else {
        CreateAllReduceOp(result, g_name);
      }
      break;
    default:
      LOG(FATAL) << "Unknown reduce strategy.";
      break;
  }
}

void DistSSAGraphBuilder::InsertPostprocessOps(ir::Graph *result) const {
  if (need_broadcast_var_ ||
      (UseGPU() &&
       strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce)) {
    if (strategy_.fuse_broadcast_op_) {
      CreateFusedBroadcastOp(result, bcast_var_name_set_);
    } else {
      for (size_t dev_id = 0; dev_id < bcast_var_name_set_.size(); ++dev_id) {
        auto &to_bcast_set = bcast_var_name_set_[dev_id];
        for (auto &bcast_name : to_bcast_set) {
          CreateBroadcastOp(result, bcast_name, dev_id);
        }
      }
    }
  }
}

std::unordered_set<std::string> &MultiDevSSAGraphBuilder() {
  static std::unordered_set<std::string> regs;
  return regs;
Y
Yu Yang 已提交
979
}
980 981 982 983 984 985

static int MultiDevSSAGraphBuilderRegister(const std::string &builder_mode) {
  MultiDevSSAGraphBuilder().insert(builder_mode);
  return 0;
}

Y
Yu Yang 已提交
986 987 988
}  // namespace details
}  // namespace framework
}  // namespace paddle
X
Xin Pan 已提交
989

990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
#define REGISTER_MULTI_DEVICES_PASS(pass_name, pass_class)                     \
  STATIC_ASSERT_GLOBAL_NAMESPACE(                                              \
      _reg_ssa_graph_builder_##pass_name,                                      \
      "REGISTER_MULTI_DEVICES_PASS must be called in global namespace.");      \
  int _reg_ssa_graph_builder_entry_##pass_name =                               \
      paddle::framework::details::MultiDevSSAGraphBuilderRegister(#pass_name); \
  REGISTER_PASS(pass_name, pass_class)                                         \
      .RequirePassAttr(paddle::framework::details::kLossVarName)               \
      .RequirePassAttr(paddle::framework::details::kPlaces)                    \
      .RequirePassAttr(paddle::framework::details::kLocalScopes)               \
      .RequirePassAttr(paddle::framework::details::kStrategy)                  \
      .RequirePassAttr(paddle::framework::details::kNRanks)

REGISTER_MULTI_DEVICES_PASS(reduce_mode_multi_devices_pass,
                            paddle::framework::details::ReduceSSAGraphBuilder);
REGISTER_MULTI_DEVICES_PASS(
    allreduce_mode_multi_devices_pass,
    paddle::framework::details::AllReduceSSAGraphBuilder);
REGISTER_MULTI_DEVICES_PASS(dist_multi_devices_pass,
                            paddle::framework::details::DistSSAGraphBuilder);
Q
can run  
Qiao Longfei 已提交
1010 1011
REGISTER_MULTI_DEVICES_PASS(async_multi_devices_pass,
                            paddle::framework::details::AsyncSSAGraphBuilder);