multi_devices_graph_pass.cc 38.3 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
C
chengduo 已提交
14
#include "paddle/fluid/framework/details/multi_devices_graph_pass.h"
C
chengduoZH 已提交
15
#include <algorithm>
Y
Yancey1989 已提交
16
#include <fstream>
17
#include <memory>
C
chengduoZH 已提交
18
#include <string>
19 20
#include <unordered_map>
#include <unordered_set>
C
chengduoZH 已提交
21
#include <utility>
C
chengduoZH 已提交
22
#include <vector>
23
#include "paddle/fluid/framework/details/all_reduce_op_handle.h"
C
chengduoZH 已提交
24
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
25
#include "paddle/fluid/framework/details/computation_op_handle.h"
W
Wu Yi 已提交
26
#include "paddle/fluid/framework/details/fetch_barrier_op_handle.h"
27
#include "paddle/fluid/framework/details/fused_broadcast_op_handle.h"
C
chengduoZH 已提交
28
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yancey1989 已提交
29
#include "paddle/fluid/framework/details/rpc_op_handle.h"
Y
Yu Yang 已提交
30
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
X
better  
Xin Pan 已提交
31
#include "paddle/fluid/framework/ir/graph_helper.h"
X
Xin Pan 已提交
32
#include "paddle/fluid/framework/ir/node.h"
Y
Fix bug  
yuyang18 已提交
33
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
34
#include "paddle/fluid/framework/scope.h"
35
#include "paddle/fluid/operators/math/math_function.h"
Y
Yu Yang 已提交
36

Y
Yu Yang 已提交
37 38 39
namespace paddle {
namespace framework {
namespace details {
X
Xin Pan 已提交
40

X
Xin Pan 已提交
41
namespace {
Y
Yancey1989 已提交
42 43 44 45 46
// TODO(panyx0718): Clean this up as well.
// all operators. NOTE that even we use a vector here, the operators is
// unordered.
typedef std::vector<OpHandleBase *> GraphOps;
const char kGraphOps[] = "ops";
X
Xin Pan 已提交
47

C
chengduo 已提交
48 49 50 51 52 53
bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) {
  return boost::get<int>(
             node.Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
         static_cast<int>(role);
}

X
Xin Pan 已提交
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
void PolishGraphToSupportDataHazards(ir::Graph *graph) {
  for (auto &var_map : graph->Get<GraphVars>(kGraphVars)) {
    for (auto &name_pair : var_map) {
      if (name_pair.second.size() <= 1) {
        continue;
      }
      auto it_new = name_pair.second.rbegin();
      auto it_old = name_pair.second.rbegin();
      ++it_old;
      for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) {
        OpHandleBase *write_op = (*it_new)->GeneratedOp();
        const auto &read_ops = (*it_old)->PendingOps();

        for (auto *read_op : read_ops) {
          // Manually add a dependency var from read_op to write_op;
          if (read_op == write_op) {
            // Read Write is the same op.
            continue;
          }
          bool has_dep = false;
          for (auto *r_out : read_op->Outputs()) {
            for (auto *w_in : write_op->Inputs()) {
              if (r_out->Node() == w_in->Node()) {
                has_dep = true;
                break;
              }
            }
          }
          if (has_dep) continue;

          auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar());
          read_op->AddOutput(dep_var);
          write_op->AddInput(dep_var);
          graph->Get<GraphDepVars>(kGraphDepVars).emplace(dep_var);
        }
      }
    }
  }
}

VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node,
                                      const platform::Place &place,
                                      size_t place_offset) {
  auto &var_holders = graph->Get<GraphVars>(kGraphVars)[place_offset];
  auto &var_holder = var_holders[node->Name()];
  VarHandle *var = nullptr;
  if (var_holder.empty()) {
    if (node->Var()) {
      var = new VarHandle(graph->CreateVarNode(node->Var()), 0, place_offset,
                          node->Name(), place);
    } else {
      var = new VarHandle(
          graph->CreateEmptyNode(node->Name(), ir::Node::Type::kVariable), 0,
          place_offset, node->Name(), place);
    }
    var_holder.emplace_back(var);
  } else {
X
clean1  
Xin Pan 已提交
111
    var = *var_holder.rbegin();
X
Xin Pan 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
  }
  return var;
}

void CreateOpOutput(ir::Graph *graph, OpHandleBase *op_handle,
                    ir::Node *new_node, const platform::Place &place,
                    size_t place_offset) {
  auto &vars =
      graph->Get<GraphVars>(kGraphVars)[place_offset][new_node->Name()];
  size_t version = vars.size();
  auto var =
      new VarHandle(new_node, version, place_offset, new_node->Name(), place);
  vars.emplace_back(var);
  op_handle->AddOutput(var);
}

void AddOutputToLeafOps(ir::Graph *graph) {
  for (auto &op : graph->Get<GraphOps>(kGraphOps)) {
    if (!op->Outputs().empty()) {
      continue;
    }
    auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar());
    graph->Get<GraphDepVars>(kGraphDepVars).emplace(dummy_leaf);
    op->AddOutput(dummy_leaf);
  }
}
}  // namespace
Y
Yu Yang 已提交
139

C
chengduo 已提交
140 141
void MultiDevSSAGraphBuilderBase::CheckGraph(const ir::Graph &graph) const {}

142
void MultiDevSSAGraphBuilderBase::Init() const {
X
clean  
Xin Pan 已提交
143 144
  all_vars_.clear();

X
Xin Pan 已提交
145
  loss_var_name_ = Get<const std::string>(kLossVarName);
C
chengduo 已提交
146
  VLOG(10) << "Init MultiDevSSAGraphBuilder, loss name: " << loss_var_name_;
X
Xin Pan 已提交
147 148 149
  places_ = Get<const std::vector<platform::Place>>(kPlaces);
  local_scopes_ = Get<const std::vector<Scope *>>(kLocalScopes);
  strategy_ = Get<const BuildStrategy>(kStrategy);
P
peizhilin 已提交
150
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
C
chengduo 已提交
151
  nccl_ctxs_ = &Get<platform::NCCLContextMap>(kNCCLCtxs);
Y
Yu Yang 已提交
152
#endif
C
chengduo 已提交
153
  PADDLE_ENFORCE_EQ(places_.size(), local_scopes_.size());
Y
Yu Yang 已提交
154 155
}

156
std::unique_ptr<ir::Graph> MultiDevSSAGraphBuilderBase::ApplyImpl(
X
Xin Pan 已提交
157
    std::unique_ptr<ir::Graph> graph) const {
X
Xin Pan 已提交
158
  Init();
C
chengduo 已提交
159
  CheckGraph(*graph);
160
  std::vector<ir::Node *> sorted_ops = SortOperations(*graph);
C
chengduo 已提交
161

X
Xin Pan 已提交
162 163
  auto nodes = graph->ReleaseNodes();
  ir::Graph &result = *graph;
164 165

  for (auto &node : nodes) {
X
Xin Pan 已提交
166
    if (node->IsVar() && node->Var()) {
X
Xin Pan 已提交
167
      all_vars_.emplace(node->Name(), node->Var());
168
    }
C
fix ci  
chengduoZH 已提交
169
  }
Y
Yu Yang 已提交
170 171

  // We cannot invoke resize. It is a bug of GCC 4.8
X
Xin Pan 已提交
172 173 174
  result.Set(kGraphVars, new GraphVars(places_.size()));
  result.Set(kGraphDepVars, new GraphDepVars);
  result.Set(kGraphOps, new GraphOps);
175

Y
Yu Yang 已提交
176
  bool is_forwarding = true;
X
Xin Pan 已提交
177

X
better  
Xin Pan 已提交
178
  for (ir::Node *node : sorted_ops) {
179 180
    if (DealWithSpecialOp(&result, node)) {
      continue;
Y
Yu Yang 已提交
181
    } else {
182 183 184 185 186 187 188 189 190
      // This op runs on all devices
      if (IsScaleLossOp(node)) {
        // user can customize loss@grad if not use_default_grad_scale_
        InsertScaleLossGradOp(&result, node);
        // This assumes the backward generating code will ensure IsScaleLossOp
        // is true only for the op that scale the final scalar loss.
        // It also assumes backward op will always follow the forward op in
        // the block.
        is_forwarding = false;
C
chengduo 已提交
191
      } else {
192 193
        CreateComputationalOps(&result, node, places_.size());
      }
194

W
Wu Yi 已提交
195 196
      // Insert collective ops if nranks > 1
      if (!is_forwarding && Get<size_t>(kNRanks) > 1) {
197
        try {
C
chengduo 已提交
198 199 200 201 202
          bool is_bk_op =
              static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
                                    OpProtoAndCheckerMaker::OpRoleAttrName())) &
                                static_cast<int>(OpRole::kBackward));
          if (!is_bk_op) continue;
203

C
chengduo 已提交
204 205
          // Currently, we assume that once gradient is generated, it can be
          // broadcast, and each gradient is only broadcast once.
206 207 208 209 210 211 212
          auto backward_vars =
              boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
                  OpProtoAndCheckerMaker::OpRoleVarAttrName()));
          PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);
          for (size_t i = 0; i < backward_vars.size(); i += 2) {
            auto &p_name = backward_vars[i];
            auto &g_name = backward_vars[i + 1];
213 214
            VLOG(10) << "Bcast " << g_name << " for parameter " << p_name
                     << " op_type " << node->Op()->Type();
W
Wu Yi 已提交
215 216 217
            if (NeedCollectiveForGrad(g_name, sorted_ops)) {
              InsertCollectiveOp(&result, p_name, g_name);
            }
Y
Yu Yang 已提交
218
          }
219
        } catch (boost::bad_get e) {
Y
Yu Yang 已提交
220 221 222 223
        }
      }
    }
  }
224

225 226
  InsertPostprocessOps(&result);

Y
Yu Yang 已提交
227
  /*
X
Xin Pan 已提交
228 229
  Dependency graph has been constructed. However, there are still data
  hazards need to be handled.
230
  */
Y
Yu Yang 已提交
231
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
232

Y
Yu Yang 已提交
233 234 235 236
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);
C
chengduo 已提交
237

Y
Yancey1989 已提交
238
  result.Erase(kGraphOps);
Q
qiaolongfei 已提交
239
  return graph;
Y
Yu Yang 已提交
240 241
}

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
void MultiDevSSAGraphBuilderBase::InsertScaleLossGradOp(
    ir::Graph *result, const ir::Node *node) const {
  // user can customize loss@grad if not use_default_grad_scale_
  size_t loss_scale = 0;
  switch (this->strategy_.gradient_scale_) {
    case BuildStrategy::GradientScaleStrategy::kOne:
      loss_scale = 1;
      break;
    case BuildStrategy::GradientScaleStrategy::kCoeffNumDevice:
      loss_scale = Get<size_t>(kNRanks);
      break;
    case BuildStrategy::GradientScaleStrategy::kCustomized:
      loss_scale = 0;
      break;
    default:
      LOG(FATAL) << "Unknown gradient scale strategy.";
      break;
  }

  if (loss_scale) {
    // TODO(paddle-dev): Why is there no input for this op_handle?
    auto loss_grad_name = node->Op()->OutputArgumentNames()[0];
    auto out_dtype = this->all_vars_.at(loss_grad_name)->GetDataType();
    this->CreateScaleLossGradOp(result, loss_grad_name, node->outputs[0],
                                loss_scale, out_dtype);
  }
}
C
chengduo 已提交
269

C
chengduo 已提交
270 271 272 273 274
bool MultiDevSSAGraphBuilderBase::DealWithSpecialOp(ir::Graph *result,
                                                    ir::Node *node) const {
  return false;
}

275 276 277 278
std::vector<ir::Node *> MultiDevSSAGraphBuilderBase::SortOperations(
    const ir::Graph &graph) const {
  return ir::TopologySortOperations(graph);
}
C
chengduo 已提交
279

280 281 282 283 284 285 286
bool MultiDevSSAGraphBuilderBase::UseGPU() const {
  bool use_gpu = false;
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
  use_gpu = nccl_ctxs_ != nullptr;
#endif
  return use_gpu;
}
C
chengduo 已提交
287

W
Wu Yi 已提交
288 289 290 291 292 293 294 295 296 297 298 299 300 301
bool MultiDevSSAGraphBuilderBase::NeedCollectiveForGrad(
    const std::string &grad_name, std::vector<ir::Node *> ops) const {
  // if we have allreduce_op for current gradient variable in the graph,
  // then we don't need to add allreduce_op_handle for this gradient
  // NOTE: This is for the case that all gradients should add collective ops
  for (auto *node : ops) {
    if (node->Op()->Type() != "allreduce") continue;
    for (auto in_name : node->Op()->InputArgumentNames()) {
      if (in_name == grad_name) {
        return false;
      }
    }
  }
  return true;
C
chengduo 已提交
302 303
}

304 305 306
void MultiDevSSAGraphBuilderBase::CreateOpHandleIOs(ir::Graph *result,
                                                    ir::Node *node,
                                                    size_t place_id) const {
C
chengduo 已提交
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
  auto p = places_[place_id];
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));

  for (ir::Node *input : node->inputs) {
    VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id);
    op_handle->AddInput(var);
  }

  for (ir::Node *output : node->outputs) {
    ir::Node *new_node = nullptr;
    if (output->Var()) {
      new_node = result->CreateVarNode(output->Var());
    } else {
      new_node =
          result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
    }
    CreateOpOutput(result, op_handle, new_node, p, place_id);
  }
}

329
void MultiDevSSAGraphBuilderBase::SetCommunicationContext(
330
    OpHandleBase *op_handle, const platform::Place &p) const {
P
peizhilin 已提交
331
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
332 333 334 335 336 337 338 339 340 341
  if (nccl_ctxs_ == nullptr) {
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
  }
#else
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
#endif
}

342 343 344
void MultiDevSSAGraphBuilderBase::CreateBroadcastOp(ir::Graph *result,
                                                    const std::string &p_name,
                                                    size_t src_dev_id) const {
P
peizhilin 已提交
345
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
polish  
Xin Pan 已提交
346 347 348
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
C
chengduoZH 已提交
349
#else
X
polish  
Xin Pan 已提交
350 351 352
  auto *op_handle = new BroadcastOpHandle(
      result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
C
chengduoZH 已提交
353
#endif
X
Xin Pan 已提交
354
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
X
Xin Pan 已提交
355

X
Xin Pan 已提交
356
  auto *in =
X
clean1  
Xin Pan 已提交
357
      result->Get<GraphVars>(kGraphVars).at(src_dev_id).at(p_name).back();
C
chengduoZH 已提交
358 359 360 361
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
362
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
363
    auto &vars = result->Get<GraphVars>(kGraphVars).at(i).at(p_name);
X
polish  
Xin Pan 已提交
364 365 366
    auto *out_var = new VarHandle(
        result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(),
        i, p_name, p);
C
chengduoZH 已提交
367 368 369 370 371
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
  }
}

372
void MultiDevSSAGraphBuilderBase::CreateFusedBroadcastOp(
373 374
    ir::Graph *result,
    const std::vector<std::unordered_set<std::string>> &bcast_varnames) const {
P
peizhilin 已提交
375
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_);
#else
  auto *op_handle = new FusedBroadcastOpHandle(
      result->CreateEmptyNode("fused_broadcast", ir::Node::Type::kOperation),
      local_scopes_, places_);
#endif
  result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    SetCommunicationContext(op_handle, p);
  }

  for (size_t dev_id = 0; dev_id < bcast_varnames.size(); ++dev_id) {
    for (auto &p_name : bcast_varnames[dev_id]) {
      auto *in =
X
clean1  
Xin Pan 已提交
394
          result->Get<GraphVars>(kGraphVars).at(dev_id).at(p_name).back();
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
      op_handle->AddInput(in);
      for (size_t out_dev_id = 0; out_dev_id < places_.size(); ++out_dev_id) {
        auto &p = places_[out_dev_id];
        auto &vars =
            result->Get<GraphVars>(kGraphVars).at(out_dev_id).at(p_name);
        auto *out_var = new VarHandle(
            result->CreateEmptyNode(p_name, ir::Node::Type::kVariable),
            vars.size(), out_dev_id, p_name, p);
        vars.emplace_back(out_var);
        op_handle->AddOutput(out_var);
      }
    }
  }
}

410 411 412
void MultiDevSSAGraphBuilderBase::CreateComputationalOp(ir::Graph *result,
                                                        ir::Node *node,
                                                        int dev_id) const {
X
Xin Pan 已提交
413
  result->Get<GraphOps>(kGraphOps).emplace_back(
X
Xin Pan 已提交
414
      new ComputationOpHandle(result->CreateOpNode(node->Op()),
S
sneaxiy 已提交
415
                              local_scopes_[dev_id], places_[dev_id], dev_id));
416
  CreateOpHandleIOs(result, node, dev_id);
C
chengduoZH 已提交
417 418
}

419 420 421
void MultiDevSSAGraphBuilderBase::CreateAllReduceOp(ir::Graph *result,
                                                    const std::string &og,
                                                    bool is_encoded) const {
Y
Yancey1989 已提交
422 423 424
  OpHandleBase *op_handle = nullptr;

  auto append_allreduce_op = [&](
Y
Yancey1989 已提交
425 426
      const std::vector<Scope *> &scopes,
      const std::vector<platform::Place> &places) -> OpHandleBase * {
P
peizhilin 已提交
427
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
Y
Yancey1989 已提交
428 429
    result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
        result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
430 431 432
        scopes, places, nccl_ctxs_, is_encoded,
        static_cast<int>(strategy_.trainers_endpoints_.size()) *
            places_.size()));
C
chengduoZH 已提交
433
#else
Y
Yancey1989 已提交
434 435 436
    result->Get<GraphOps>(kGraphOps).emplace_back(new AllReduceOpHandle(
        result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation),
        scopes, places));
C
chengduoZH 已提交
437
#endif
Y
Yancey1989 已提交
438 439 440 441 442
    return result->Get<GraphOps>(kGraphOps).back();
  };

  if (!strategy_.enable_parallel_graph_)
    op_handle = append_allreduce_op(local_scopes_, places_);
Y
Yu Yang 已提交
443 444

  for (size_t i = 0; i < places_.size(); ++i) {
Y
Yancey1989 已提交
445 446 447
    if (strategy_.enable_parallel_graph_) {
      op_handle = append_allreduce_op({local_scopes_[i]}, {places_[i]});
    }
Y
Yancey1989 已提交
448

Y
Yancey1989 已提交
449
    SetCommunicationContext(op_handle, places_[i]);
X
Xin Pan 已提交
450
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
Y
Yu Yang 已提交
451 452
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
453
    op_handle->AddInput(prev_grad);
454
    VLOG(10) << "all_reduce_op_handle add input " << prev_grad->DebugString();
Y
Yu Yang 已提交
455

X
Xin Pan 已提交
456
    auto var =
X
polish  
Xin Pan 已提交
457
        new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
Y
Yancey1989 已提交
458
                      vars.size(), i, og, places_[i]);
Y
Yu Yang 已提交
459 460
    vars.emplace_back(var);
    op_handle->AddOutput(var);
461 462
    VLOG(10) << "all_reduce_op_handle add output " << og
             << ", handle:" << var->DebugString();
Y
Yu Yang 已提交
463 464 465
  }
}

466
void MultiDevSSAGraphBuilderBase::CreateScaleLossGradOp(
467
    ir::Graph *result, const std::string &loss_grad_name,
468 469
    ir::Node *out_var_node, size_t loss_scale,
    proto::VarType::Type dtype) const {
Y
Yu Yang 已提交
470
  for (size_t i = 0; i < places_.size(); ++i) {
Y
yuyang18 已提交
471
    auto *dev_ctx = platform::DeviceContextPool::Instance().Get(places_[i]);
X
Xin Pan 已提交
472
    auto *op_handle = new ScaleLossGradOpHandle(
X
polish  
Xin Pan 已提交
473
        result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation),
474
        loss_scale, local_scopes_[i], places_[i], dev_ctx, dtype);
X
Xin Pan 已提交
475
    result->Get<GraphOps>(kGraphOps).emplace_back(op_handle);
Y
Yu Yang 已提交
476 477 478 479 480 481 482

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

483 484
    CreateOpOutput(result, op_handle,
                   result->CreateVarNode(out_var_node->Var()), places_[i], i);
Y
Yu Yang 已提交
485 486 487
  }
}

488 489
void MultiDevSSAGraphBuilderBase::CreateComputationalOps(
    ir::Graph *result, ir::Node *node, size_t num_places) const {
T
typhoonzero 已提交
490
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
491 492
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
S
sneaxiy 已提交
493 494
    result->Get<GraphOps>(kGraphOps).emplace_back(new ComputationOpHandle(
        result->CreateOpNode(node->Op()), s, p, scope_idx));
495
    CreateOpHandleIOs(result, node, scope_idx);
Y
Yu Yang 已提交
496 497 498
  }
}

499 500 501
VarHandle *MultiDevSSAGraphBuilderBase::CreateReduceOp(ir::Graph *result,
                                                       const std::string &og,
                                                       int dst_dev_id) const {
P
peizhilin 已提交
502
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
X
Xin Pan 已提交
503
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
504 505
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_, nccl_ctxs_));
C
chengduoZH 已提交
506
#else
X
Xin Pan 已提交
507
  result->Get<GraphOps>(kGraphOps).emplace_back(new ReduceOpHandle(
X
polish  
Xin Pan 已提交
508 509
      result->CreateEmptyNode("reduce", ir::Node::Type::kOperation),
      local_scopes_, places_));
C
chengduoZH 已提交
510
#endif
X
clean1  
Xin Pan 已提交
511
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
C
chengduoZH 已提交
512 513 514

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
C
chengduoZH 已提交
515
    SetCommunicationContext(op_handle, p);
X
Xin Pan 已提交
516
    auto &vars = result->Get<GraphVars>(kGraphVars)[i][og];
C
chengduoZH 已提交
517 518
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
X
clean1  
Xin Pan 已提交
519
    op_handle->AddInput(prev_grad);
C
chengduoZH 已提交
520
  }
X
Xin Pan 已提交
521
  auto &vars = result->Get<GraphVars>(kGraphVars)[dst_dev_id][og];
X
polish  
Xin Pan 已提交
522 523 524
  auto var =
      new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable),
                    vars.size(), dst_dev_id, og, places_[dst_dev_id]);
C
chengduoZH 已提交
525 526 527 528 529
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

530
bool MultiDevSSAGraphBuilderBase::IsScaleLossOp(ir::Node *node) const {
C
chengduo 已提交
531 532
  return !loss_var_name_.empty() && node->Op() &&
         boost::get<int>(
533 534
             node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
             (static_cast<int>(OpRole::kBackward) |
C
chengduo 已提交
535
              static_cast<int>(OpRole::kLoss));
536 537 538 539 540
}

bool MultiDevSSAGraphBuilderBase::IsSparseGradient(
    const std::string &og) const {
  PADDLE_ENFORCE(all_vars_.count(og) != 0);
C
chengduo 已提交
541
  return all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS;
542 543 544
}

void AllReduceSSAGraphBuilder::InsertCollectiveOp(
Y
Yancey1989 已提交
545
    ir::Graph *result, const std::string &p_name,
546 547 548 549 550
    const std::string &g_name) const {
  if (IsSparseGradient(g_name)) {
    CreateReduceOp(result, g_name, 0);
    CreateBroadcastOp(result, g_name, 0);
  } else {
Y
Yancey1989 已提交
551
    CreateAllReduceOp(result, g_name);
552
  }
553
}
554

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
int BalanceVarSSAGraphBuilder::GetVarDeviceID(
    const std::string &varname) const {
  auto got = sharded_var_device_.find(varname);
  if (got == sharded_var_device_.end()) {
    auto pos = varname.find(framework::kNewGradSuffix);
    if (pos != std::string::npos) {
      got = sharded_var_device_.find(varname.substr(0, pos));
    }
  }
  return got == sharded_var_device_.end() ? -1 : got->second;
}

int BalanceVarSSAGraphBuilder::GetOpDeviceID(ir::Node *node) const {
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
    return -1;
  }
  if (!OpHaveRole(*node, framework::OpRole::kOptimize)) {
    return -1;
  }
  auto param_grad = boost::get<std::vector<std::string>>(
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
  int dev_id = GetVarDeviceID(param_grad[1]);
  PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]",
                    node->Op()->Type(), param_grad[0], param_grad[1]);
  return dev_id;
}

size_t BalanceVarSSAGraphBuilder::GetAppropriateDeviceID(
    const std::vector<std::string> &var_names) const {
  int64_t numel_sum = 0;
  for (auto var_name : var_names) {
    if (all_vars_.find(var_name) == all_vars_.end()) continue;
    auto var_desc = all_vars_.at(var_name);
    PADDLE_ENFORCE_NOT_NULL(var_desc);
    auto dim = framework::make_ddim(var_desc->GetShape());
    int64_t numel = framework::product(dim);
    PADDLE_ENFORCE_GT(numel, 0);
    numel_sum += numel;
  }

  auto smallest =
      std::min_element(std::begin(balance_vars_), std::end(balance_vars_));
  size_t dev_id =
      static_cast<size_t>(std::distance(std::begin(balance_vars_), smallest));
  balance_vars_[dev_id] += numel_sum;
  return dev_id;
}

void BalanceVarSSAGraphBuilder::ResetState() const {
  balance_vars_.clear();
  sharded_var_device_.clear();

  balance_vars_.resize(places_.size(), 0);
}

void ReduceSSAGraphBuilder::Init() const {
  MultiDevSSAGraphBuilderBase::Init();
  ResetState();
}

void ReduceSSAGraphBuilder::ResetState() const {
  BalanceVarSSAGraphBuilder::ResetState();
  bcast_var_name_set_.clear();
  bcast_var_name_set_.resize(places_.size());
}

void ReduceSSAGraphBuilder::InsertCollectiveOp(
Y
Yancey1989 已提交
624
    ir::Graph *result, const std::string &p_name,
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
    const std::string &g_name) const {
  size_t cur_device_id = GetAppropriateDeviceID({g_name});
  CreateReduceOp(result, g_name, cur_device_id);
  sharded_var_device_.emplace(g_name, cur_device_id);
  bcast_var_name_set_[cur_device_id].emplace(p_name);
}

bool ReduceSSAGraphBuilder::DealWithSpecialOp(ir::Graph *result,
                                              ir::Node *node) const {
  int op_dev_id = BalanceVarSSAGraphBuilder::GetOpDeviceID(node);
  if (op_dev_id != -1) {
    // This op only runs on one specific device.
    CreateComputationalOp(result, node, op_dev_id);
    for (ir::Node *n : node->outputs) {
      sharded_var_device_.emplace(n->Name(), op_dev_id);
    }
    return true;
  }
  return false;
}

void ReduceSSAGraphBuilder::InsertPostprocessOps(ir::Graph *result) const {
  if (UseGPU()) {
    if (strategy_.fuse_broadcast_op_) {
      CreateFusedBroadcastOp(result, bcast_var_name_set_);
    } else {
      for (size_t dev_id = 0; dev_id < bcast_var_name_set_.size(); ++dev_id) {
        auto &to_bcast_set = bcast_var_name_set_[dev_id];
        for (auto &bcast_name : to_bcast_set) {
          CreateBroadcastOp(result, bcast_name, dev_id);
        }
Y
Yancey1989 已提交
656 657
      }
    }
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
  }
}

int ReduceSSAGraphBuilder::GetOpDeviceID(
    ir::Node *node,
    std::unordered_map<std::string, std::vector<ir::Node *>> *delay_ops) const {
  if (!OpHaveRole(*node, framework::OpRole::kOptimize)) {
    return -1;
  }

  auto param_grad = boost::get<std::vector<std::string>>(
      node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));

  PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
  int dev_id = GetVarDeviceID(param_grad[1]);

  if (dev_id == -1) {
    (*delay_ops)[param_grad[1]].push_back(node);
    return -2;
  }
  return dev_id;
}

std::vector<ir::Node *> ReduceSSAGraphBuilder::SortOperations(
    const ir::Graph &graph) const {
  std::vector<ir::Node *> sorted_ops = ir::TopologySortOperations(graph);
  return SortForReduceMode(sorted_ops);
}

std::vector<ir::Node *> ReduceSSAGraphBuilder::SortForReduceMode(
    const std::vector<ir::Node *> &topo_ops) const {
  std::vector<ir::Node *> sorted_ops;
  std::unordered_map<std::string, std::vector<ir::Node *>> delayed_op;
  sorted_ops.reserve(topo_ops.size());
  ResetState();

  auto insert_delayed_op = [&](const std::string &var_name, int dev_id) {
    sharded_var_device_.emplace(var_name, dev_id);
    if (delayed_op.count(var_name)) {
      auto &ops = delayed_op.at(var_name);
      sorted_ops.insert(sorted_ops.end(), ops.begin(), ops.end());
      delayed_op.at(var_name).clear();
Y
Yancey1989 已提交
700
    }
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
  };

  for (ir::Node *node : topo_ops) {
    int op_dev_id = GetOpDeviceID(node, &delayed_op);
    if (op_dev_id > -1) {
      // This op only runs on one specific device.
      sorted_ops.emplace_back(node);
      for (ir::Node *n : node->outputs) {
        insert_delayed_op(n->Name(), op_dev_id);
      }
    } else if (op_dev_id == -1) {
      // This op runs on all devices, and its output may have parameter's
      // gradients.
      sorted_ops.emplace_back(node);
      bool is_bk_op =
          static_cast<bool>(boost::get<int>(node->Op()->GetAttr(
                                OpProtoAndCheckerMaker::OpRoleAttrName())) &
                            static_cast<int>(OpRole::kBackward));
      if (!is_bk_op) continue;
      // Currently, we assume that once gradient is generated, it can be
      // broadcast, and each gradient is only broadcast once.
      std::vector<std::string> backward_vars;
      try {
        backward_vars =
            boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
                OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      } catch (boost::bad_get e) {
      }
      PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);

      for (size_t i = 0; i < backward_vars.size(); i += 2) {
        auto &g_name = backward_vars[i + 1];
        size_t cur_device_id = GetAppropriateDeviceID({g_name});
        insert_delayed_op(g_name, static_cast<int>(cur_device_id));
      }
    } else if (op_dev_id == -2) {
      // The Op on which the Op depends has not yet been generated.
Y
yi.wu 已提交
738
    }
Y
Yancey1989 已提交
739 740
  }

741
  PADDLE_ENFORCE_EQ(sorted_ops.size(), topo_ops.size());
Y
Yancey1989 已提交
742

743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
  ResetState();
  return sorted_ops;
}

void DistSSAGraphBuilder::Init() const {
  MultiDevSSAGraphBuilderBase::Init();
  ResetState();
}

void DistSSAGraphBuilder::ResetState() const {
  BalanceVarSSAGraphBuilder::ResetState();
  bcast_var_name_set_.clear();
  bcast_var_name_set_.resize(places_.size());
}

bool DistSSAGraphBuilder::DealWithSpecialOp(ir::Graph *result,
                                            ir::Node *node) const {
  bool insert_op = false;
  if (OpHaveRole(*node, OpRole::kRPC)) {
    int op_dev_id = CreateRPCOp(result, node);
    PADDLE_ENFORCE(op_dev_id != -1,
                   "Can not schedule the RPC operator to the right place.");
    if (node->Op()->Type() == "recv") {
      auto recv_vars_attr =
          boost::get<std::vector<std::string>>(node->Op()->GetNullableAttr(
              OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      PADDLE_ENFORCE(recv_vars_attr.size() == 2UL);  // [parameter, gradient]
      if (recv_vars_attr[0].find(".block") == std::string::npos) {
        bcast_var_name_set_[op_dev_id].emplace(recv_vars_attr[0]);
      }
    }
    insert_op = true;
    need_broadcast_var_ = true;
  } else if (OpHaveRole(*node, OpRole::kDist)) {
    int op_dev_id = CreateDistTrainOp(result, node);
    if (node->Op()->Type() == "concat") {
      auto origin_param_name = node->Op()->OutputArgumentNames()[0];
      bcast_var_name_set_[op_dev_id].emplace(origin_param_name);
    }
    insert_op = true;
  } else {
    int op_dev_id = GetOpDeviceID(node);
    if (op_dev_id != -1) {  // This op only runs on one specific device.
      CreateComputationalOp(result, node, op_dev_id);
      for (ir::Node *n : node->outputs) {
        sharded_var_device_.emplace(n->Name(), op_dev_id);
      }
      insert_op = true;
    }
  }
  return insert_op;
W
Wu Yi 已提交
794 795 796
}

void SetOpInputsAllPlaces(ir::Graph *result, ir::Node *node, int num_places) {
X
clean1  
Xin Pan 已提交
797
  auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
798 799 800 801 802 803
  for (ir::Node *input : node->inputs) {
    VarHandle *var = nullptr;
    for (int place_offset = 0; place_offset < num_places; ++place_offset) {
      auto &var_holders = result->Get<GraphVars>(kGraphVars)[place_offset];
      auto &var_holder = var_holders[input->Name()];
      if (!var_holder.empty()) {
X
clean1  
Xin Pan 已提交
804
        var = *var_holder.rbegin();
W
Wu Yi 已提交
805 806 807
        op_handle->AddInput(var);
      }
    }
Y
Yancey1989 已提交
808 809 810
  }
}

811
// Create RPC related op handles that connects its in ops and out ops.
812
int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const {
Y
Yancey1989 已提交
813
  int op_dev_id = -1;
814
  if (node->Op()->Type() == "send") {
X
Xin Pan 已提交
815
    // TODO(paddle-dev): getting the first var is not safe.
816
    op_dev_id = GetVarDeviceID(node->inputs[0]->Name());
X
Xin Pan 已提交
817 818
    PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
                   "This hack no longer holds, please fix.");
Y
Yancey1989 已提交
819 820 821
    // the variable name which contains .block means it was splited by
    // split_byref op
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce &&
X
Xin Pan 已提交
822
        node->inputs[0]->Name().find(".block") == std::string::npos) {
823 824
      std::vector<std::string> input_var_names;
      for (ir::Node *n : node->inputs) {
X
Xin Pan 已提交
825
        input_var_names.push_back(n->Name());
826
      }
W
Wu Yi 已提交
827 828 829 830
      auto send_param_grad = boost::get<std::vector<std::string>>(
          node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
      PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
      op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
M
minqiyang 已提交
831 832
      VLOG(10) << "send grad " << input_var_names[0] << " origin "
               << send_param_grad[1] << " place: " << op_dev_id;
833
      for (auto &varname : input_var_names) {
834
        sharded_var_device_.emplace(varname, op_dev_id);
Y
Yancey1989 已提交
835
      }
836
      sharded_var_device_.emplace(send_param_grad[1], op_dev_id);
Y
Yancey1989 已提交
837
    }
838 839 840
  } else if (node->Op()->Type() == "recv") {
    std::vector<std::string> output_var_names;
    for (ir::Node *n : node->outputs) {
X
Xin Pan 已提交
841
      output_var_names.push_back(n->Name());
842
    }
W
Wu Yi 已提交
843 844 845
    auto recv_param_grad = boost::get<std::vector<std::string>>(
        node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
    if (recv_param_grad.size() == 2U) {
846
      op_dev_id = GetVarDeviceID(recv_param_grad[1]);
M
minqiyang 已提交
847 848 849
      VLOG(10) << "recv param " << recv_param_grad[0]
               << " get grad place: " << recv_param_grad[1]
               << " place: " << op_dev_id;
W
Wu Yi 已提交
850 851 852
    } else {
      op_dev_id = GetAppropriateDeviceID(output_var_names);
    }
853
    for (auto &varname : output_var_names) {
854
      sharded_var_device_.emplace(varname, op_dev_id);
Y
Yancey1989 已提交
855 856
    }
  } else {
W
Wu Yi 已提交
857
    // send_barrier, fetch_barrier will run on place 0;
Y
Yancey1989 已提交
858 859 860 861
    op_dev_id = 0;
  }

  PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s",
862
                 node->Op()->Type());
W
Wu Yi 已提交
863 864 865 866 867 868 869 870 871 872 873

  // Create fetch_barrier op handle to enable output on all devices.
  // **NOTE** fetch_barrier should output variables list same as recv op does.
  if (node->Op()->Type() == "fetch_barrier") {
    result->Get<GraphOps>(kGraphOps).emplace_back(new FetchBarrierOpHandle(
        result->CreateOpNode(node->Op()), local_scopes_, places_));
  } else {
    result->Get<GraphOps>(kGraphOps).emplace_back(new RPCOpHandle(
        result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id],
        node->Op()->Type(), places_[op_dev_id]));
  }
Y
fix pe  
Yancey1989 已提交
874

W
Wu Yi 已提交
875 876
  if (node->Op()->Type() == "send") {
    CreateOpHandleIOs(result, node, op_dev_id);
Y
Yancey1989 已提交
877
  } else {
W
Wu Yi 已提交
878 879 880
    // send_barrier, recv, fetch_barrier's inputs are deps var, get them from
    // all places
    auto p = places_[op_dev_id];
X
clean1  
Xin Pan 已提交
881
    auto *op_handle = result->Get<GraphOps>(kGraphOps).back();
W
Wu Yi 已提交
882 883
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
Y
Yancey1989 已提交
884

W
Wu Yi 已提交
885 886 887 888
    SetOpInputsAllPlaces(result, node, places_.size());
    for (ir::Node *output : node->outputs) {
      int outvar_dev_id = op_dev_id;
      if (node->Op()->Type() == "fetch_barrier") {
889
        outvar_dev_id = GetVarDeviceID(output->Name());
Q
Qiao Longfei 已提交
890
        PADDLE_ENFORCE_NE(outvar_dev_id, -1, "output name %s", output->Name());
W
Wu Yi 已提交
891 892 893 894 895 896 897 898 899 900 901 902
      }
      p = places_[outvar_dev_id];
      ir::Node *new_node = nullptr;
      if (output->Var()) {
        new_node = result->CreateVarNode(output->Var());
      } else {
        new_node =
            result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable);
      }
      CreateOpOutput(result, op_handle, new_node, p, outvar_dev_id);
    }
  }
Y
Yancey1989 已提交
903
  return op_dev_id;
Y
Yu Yang 已提交
904 905
}

906 907 908 909 910 911 912
int DistSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result,
                                           ir::Node *node) const {
  int op_dev_id = -1;
  std::vector<std::string> input_var_names;
  std::vector<std::string> output_var_names;
  for (ir::Node *input : node->inputs) {
    input_var_names.push_back(input->Name());
C
chengduo 已提交
913
  }
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
  for (ir::Node *output : node->outputs) {
    output_var_names.push_back(output->Name());
  }

  if (node->Op()->Type() == "split_byref" ||
      node->Op()->Type() == "split_selected_rows" ||
      node->Op()->Type() == "split_ids") {
    // TODO(paddle-dev): getting the first var is not safe.
    op_dev_id = GetVarDeviceID(input_var_names[0]);
    if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) {
      op_dev_id = GetAppropriateDeviceID(input_var_names);
      for (auto &varname : input_var_names) {
        sharded_var_device_.emplace(varname, op_dev_id);
      }
    }
    for (auto &varname : output_var_names) {
      sharded_var_device_.emplace(varname, op_dev_id);
    }
  } else if (node->Op()->Type() == "concat") {
    op_dev_id = GetVarDeviceID(input_var_names[0]);
    for (auto &varname : output_var_names) {
      sharded_var_device_.emplace(varname, op_dev_id);
    }
  } else {
    LOG(ERROR) << "got unexpected dist op: " << node->Op()->Type();
    PADDLE_THROW(
        "the distribute training related op should be in [split_byref, "
        "concat].");
  }

  PADDLE_ENFORCE(op_dev_id != -1,
                 "can not find right place for distributed op: %s",
                 node->Op()->Type());

  CreateComputationalOp(result, node, op_dev_id);
  return op_dev_id;
C
chengduo 已提交
950 951
}

952 953 954 955 956 957 958 959 960 961 962
bool DistSSAGraphBuilder::IsEncoded(const std::string &p_name) const {
  auto u_name = p_name + "__dgc_u__";
  auto it = all_vars_.find(u_name);
  if (it == all_vars_.end()) {
    VLOG(10) << "can't find u_name, so it's not encoded:" << u_name;
    return false;
  }

  return true;
}

Y
Yancey1989 已提交
963
void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result,
964 965 966 967 968 969 970 971 972 973 974 975 976 977
                                             const std::string &p_name,
                                             const std::string &g_name) const {
  size_t cur_device_id = 0;
  switch (strategy_.reduce_) {
    case BuildStrategy::ReduceStrategy::kReduce:
      cur_device_id = GetAppropriateDeviceID({g_name});
      CreateReduceOp(result, g_name, cur_device_id);
      sharded_var_device_.emplace(g_name, cur_device_id);
      break;
    case BuildStrategy::ReduceStrategy::kAllReduce:
      if (IsSparseGradient(g_name)) {
        CreateReduceOp(result, g_name, 0);
        CreateBroadcastOp(result, g_name, 0);
      } else {
978 979 980 981 982
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
        CreateAllReduceOp(result, g_name, IsEncoded(p_name));
#else
        PADDLE_ENFORCE(false, "Compiled withoud cuda!");
#endif
983 984 985 986 987 988 989 990 991
      }
      break;
    default:
      LOG(FATAL) << "Unknown reduce strategy.";
      break;
  }
}

void DistSSAGraphBuilder::InsertPostprocessOps(ir::Graph *result) const {
992 993
  // broad cast received parameters when training in parameter server mode.
  if (need_broadcast_var_) {
Q
Qiao Longfei 已提交
994 995 996 997 998 999 1000 1001
    // There are 4 conditions:
    // 1. GPU && Reduce: Reduce gradient then broadcast gradient to other GPUS.
    // Need to broadcast received parameters to other GPU.
    // 2. GPU && AllReduce: AllReduce all graident to each GPU. Need to
    // broadcast received parameters to other GPU.
    // 3. CPU && AllReduce: AllReduce all gradient to each thread. Need to
    // broadcast received parameters to other scope.
    // 4. CPU && Reduce: because all parameters share the same memory, did not
Q
Qiao Longfei 已提交
1002
    // broadcast received parameters.
1003
    if (!UseGPU() &&
Q
Qiao Longfei 已提交
1004
        strategy_.reduce_ == BuildStrategy::ReduceStrategy::kReduce) {
1005 1006
      return;
    }
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
    if (strategy_.fuse_broadcast_op_) {
      CreateFusedBroadcastOp(result, bcast_var_name_set_);
    } else {
      for (size_t dev_id = 0; dev_id < bcast_var_name_set_.size(); ++dev_id) {
        auto &to_bcast_set = bcast_var_name_set_[dev_id];
        for (auto &bcast_name : to_bcast_set) {
          CreateBroadcastOp(result, bcast_name, dev_id);
        }
      }
    }
  }
}

std::unordered_set<std::string> &MultiDevSSAGraphBuilder() {
  static std::unordered_set<std::string> regs;
  return regs;
Y
Yu Yang 已提交
1023
}
1024 1025 1026 1027 1028 1029

static int MultiDevSSAGraphBuilderRegister(const std::string &builder_mode) {
  MultiDevSSAGraphBuilder().insert(builder_mode);
  return 0;
}

Y
Yu Yang 已提交
1030 1031 1032
}  // namespace details
}  // namespace framework
}  // namespace paddle
X
Xin Pan 已提交
1033

1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
#define REGISTER_MULTI_DEVICES_PASS(pass_name, pass_class)                     \
  STATIC_ASSERT_GLOBAL_NAMESPACE(                                              \
      _reg_ssa_graph_builder_##pass_name,                                      \
      "REGISTER_MULTI_DEVICES_PASS must be called in global namespace.");      \
  int _reg_ssa_graph_builder_entry_##pass_name =                               \
      paddle::framework::details::MultiDevSSAGraphBuilderRegister(#pass_name); \
  REGISTER_PASS(pass_name, pass_class)                                         \
      .RequirePassAttr(paddle::framework::details::kLossVarName)               \
      .RequirePassAttr(paddle::framework::details::kPlaces)                    \
      .RequirePassAttr(paddle::framework::details::kLocalScopes)               \
      .RequirePassAttr(paddle::framework::details::kStrategy)                  \
Y
Yancey1989 已提交
1045
      .RequirePassAttr(paddle::framework::details::kNRanks)
1046 1047 1048 1049

REGISTER_MULTI_DEVICES_PASS(reduce_mode_multi_devices_pass,
                            paddle::framework::details::ReduceSSAGraphBuilder);
REGISTER_MULTI_DEVICES_PASS(
C
chengduo 已提交
1050
    all_reduce_mode_multi_devices_pass,
1051 1052 1053
    paddle::framework::details::AllReduceSSAGraphBuilder);
REGISTER_MULTI_DEVICES_PASS(dist_multi_devices_pass,
                            paddle::framework::details::DistSSAGraphBuilder);