multi_devices_graph_builder.cc 17.2 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/details/multi_devices_graph_builder.h"
Y
Yancey1989 已提交
15
#include <fstream>
C
chengduoZH 已提交
16 17
#include <utility>
#include "paddle/fluid/framework/details/broadcast_op_handle.h"
Y
Yu Yang 已提交
18
#include "paddle/fluid/framework/details/computation_op_handle.h"
C
chengduoZH 已提交
19
#include "paddle/fluid/framework/details/reduce_op_handle.h"
Y
Yancey1989 已提交
20
#include "paddle/fluid/framework/details/rpc_op_handle.h"
Y
Yu Yang 已提交
21
#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h"
Y
Fix bug  
yuyang18 已提交
22
#include "paddle/fluid/framework/op_info.h"
Y
Yu Yang 已提交
23
#include "paddle/fluid/framework/scope.h"
Y
Yu Yang 已提交
24 25 26 27

#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/framework/details/nccl_all_reduce_op_handle.h"
#endif
Y
Yu Yang 已提交
28

Y
Yu Yang 已提交
29 30 31
#include <string>
#include <vector>

Y
Yu Yang 已提交
32 33 34
namespace paddle {
namespace framework {
namespace details {
Y
Yu Yang 已提交
35 36

#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
37 38 39 40
MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder(
    const std::vector<platform::Place> &places,
    const std::string &loss_var_name,
    const std::unordered_set<std::string> &params,
C
chengduoZH 已提交
41
    const std::vector<Scope *> &local_scopes,
Y
yuyang18 已提交
42
    platform::NCCLContextMap *nccl_ctxs, const BuildStrategy &strategy)
Y
Yu Yang 已提交
43 44 45
    : loss_var_name_(loss_var_name),
      places_(places),
      local_scopes_(local_scopes),
C
chengduoZH 已提交
46
      nccl_ctxs_(nccl_ctxs),
Y
yuyang18 已提交
47
      strategy_(strategy) {
Y
Yu Yang 已提交
48 49 50 51 52
#else
MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder(
    const std::vector<platform::Place> &places,
    const std::string &loss_var_name,
    const std::unordered_set<std::string> &params,
Y
yuyang18 已提交
53
    const std::vector<Scope *> &local_scopes, const BuildStrategy &strategy)
Y
Yu Yang 已提交
54 55
    : loss_var_name_(loss_var_name),
      places_(places),
C
chengduoZH 已提交
56
      local_scopes_(local_scopes),
Y
yuyang18 已提交
57
      strategy_(strategy) {
Y
Yu Yang 已提交
58
#endif
Y
Yu Yang 已提交
59 60 61 62 63
  for (auto &p : params) {
    grad_names_.insert(GradVarName(p));
  }
}

Y
Yu Yang 已提交
64 65
void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result,
                                                const OpDesc &op,
Y
Yu Yang 已提交
66 67
                                                size_t place_id) const {
  auto p = places_[place_id];
T
wip  
typhoonzero 已提交
68
  auto *op_handle = result->ops_.back().get();
X
Xin Pan 已提交
69 70
  op_handle->SetDeviceContext(p,
                              platform::DeviceContextPool::Instance().Get(p));
T
wip  
typhoonzero 已提交
71

Y
Yu Yang 已提交
72 73 74
  for (auto &each_var_name : op.InputArgumentNames()) {
    VarHandle *var =
        CreateOrGetLatestVarHandle(result, each_var_name, p, place_id);
T
wip  
typhoonzero 已提交
75 76 77
    op_handle->AddInput(var);
  }

Y
Yu Yang 已提交
78 79
  for (auto &each_var_name : op.OutputArgumentNames()) {
    CreateOpOutput(result, op_handle, each_var_name, p, place_id);
T
wip  
typhoonzero 已提交
80 81
  }
}
Y
fix pe  
Yancey1989 已提交
82 83 84 85

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainSendVars(
    const ProgramDesc &program) const {
  std::vector<std::string> send_vars;
Y
Yancey1989 已提交
86 87
  // since parameters are all in block 0,
  // it's enough to only scan send ops in block 0
Y
fix pe  
Yancey1989 已提交
88
  for (auto *op : program.Block(0).AllOps()) {
Y
Yancey1989 已提交
89 90 91
    // TODO(Yancey1989): use a graceful method to find send op,
    // instead of the the hard code string
    if (op->Type() == "send_vars") {
Y
fix pe  
Yancey1989 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104
      auto op_vars = op->InputArgumentNames();
      send_vars.reserve(send_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      send_vars.insert(send_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return send_vars;
}

std::vector<std::string> MultiDevSSAGraphBuilder::FindDistTrainRecvVars(
    const ProgramDesc &program) const {
  std::vector<std::string> recv_vars;
  for (auto *op : program.Block(0).AllOps()) {
Y
Yancey1989 已提交
105 106 107
    // TODO(Yancey1989): use a graceful method to find recv op,
    // instead of the hard code string
    if (op->Type() == "recv") {
Y
fix pe  
Yancey1989 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120
      auto op_vars = op->OutputArgumentNames();
      recv_vars.reserve(recv_vars.size() +
                        std::distance(op_vars.begin(), op_vars.end()));
      recv_vars.insert(recv_vars.end(), op_vars.begin(), op_vars.end());
    }
  }
  return recv_vars;
}

bool MultiDevSSAGraphBuilder::IsDistTrainOp(
    const OpDesc &op, const std::vector<std::string> &send_vars,
    const std::vector<std::string> &recv_vars) const {
  if (send_vars.size() == 0 || recv_vars.size() == 0) {
T
typhoonzero 已提交
121 122 123
    return false;
  }

Y
Yu Yang 已提交
124 125 126 127
  /**
   * Check any of opvars contains `.block` and in sendvars
   */
  auto checker = [](const std::vector<std::string> &opvars,
Y
fix pe  
Yancey1989 已提交
128
                    const std::vector<std::string> &rpc_vars) -> bool {
T
typhoonzero 已提交
129
    for (auto &var : opvars) {
Y
Yancey1989 已提交
130 131 132
      // a variable name with the suffix `.block` means it's a splited
      // variable by (DistributeTranspiler)
      // [python/paddle/fluid/transpiler/distribute_transpiler.py]
T
typhoonzero 已提交
133
      if (var.find(".block") != std::string::npos &&
Y
fix pe  
Yancey1989 已提交
134
          std::find(rpc_vars.begin(), rpc_vars.end(), var) != rpc_vars.end()) {
Y
Yu Yang 已提交
135
        return true;
T
typhoonzero 已提交
136 137
      }
    }
Y
Yu Yang 已提交
138
    return false;
T
typhoonzero 已提交
139 140
  };

Y
Yancey1989 已提交
141 142
  return checker(op.OutputArgumentNames(), send_vars) ||
         checker(op.InputArgumentNames(), recv_vars);
T
typhoonzero 已提交
143 144
}

Y
Yancey1989 已提交
145 146 147 148 149 150 151 152 153
bool MultiDevSSAGraphBuilder::IsRPCOp(const OpDesc &op) const {
  for (auto &name : op.OutputNames()) {
    if (name == "RPCClient") {
      return true;
    }
  }
  return false;
}

Y
Yu Yang 已提交
154 155
std::unique_ptr<SSAGraph> MultiDevSSAGraphBuilder::Build(
    const ProgramDesc &program) const {
C
fix ci  
chengduoZH 已提交
156 157 158 159
  std::unordered_map<std::string, proto::VarType::Type> var_types;
  for (auto *var : program.Block(0).AllVars()) {
    var_types[var->Name()] = var->GetType();
  }
C
chengduoZH 已提交
160

Y
Yu Yang 已提交
161
  auto graph = new SSAGraph();
Y
Yu Yang 已提交
162
  SSAGraph &result = *graph;
C
chengduoZH 已提交
163
  std::unordered_set<std::string> og_has_been_broadcast;
Y
Yu Yang 已提交
164 165 166 167 168

  // We cannot invoke resize. It is a bug of GCC 4.8
  result.vars_ = std::vector<
      std::unordered_map<std::string, std::vector<std::unique_ptr<VarHandle>>>>(
      places_.size());
Y
Yu Yang 已提交
169

Y
fix pe  
Yancey1989 已提交
170 171 172 173
  // find send/recv vars so that we can place the distributed training
  // realted op in the place 0
  auto send_vars = FindDistTrainSendVars(program);
  auto recv_vars = FindDistTrainRecvVars(program);
T
typhoonzero 已提交
174

C
chengduoZH 已提交
175 176 177 178 179 180
  size_t cur_device_id = 0;
  std::vector<std::unordered_set<std::string>> var_name_on_devices;
  std::vector<std::unordered_set<std::string>> bcast_var_name_set;
  var_name_on_devices.resize(places_.size());
  bcast_var_name_set.resize(places_.size());

Y
Yu Yang 已提交
181 182
  bool is_forwarding = true;
  for (auto *op : program.Block(0).AllOps()) {
Y
Yancey1989 已提交
183 184
    if (IsRPCOp(*op)) {
      // append rpc op if program is distributed trainer main program.
Y
Yu Yang 已提交
185
      // always use the first device
Y
Yancey1989 已提交
186
      CreateRPCOp(&result, *op);
Y
fix pe  
Yancey1989 已提交
187
    } else if (IsDistTrainOp(*op, send_vars, recv_vars)) {
Y
Yancey1989 已提交
188
      CreateDistTrainOp(&result, *op);
Y
Yu Yang 已提交
189
    } else if (IsScaleLossOp(*op)) {
Y
Yu Yang 已提交
190
      // user can customize loss@grad if not use_default_grad_scale_
Y
yuyang18 已提交
191 192
      if (strategy_.gradient_scale_ !=
          BuildStrategy::GradientScaleStrategy::kCustomized) {
Y
Yu Yang 已提交
193 194
        CreateScaleLossGradOp(&result);
      }
Y
Yu Yang 已提交
195
      is_forwarding = false;
Y
Yu Yang 已提交
196
    } else {
C
chengduoZH 已提交
197 198 199 200 201 202 203 204 205
      int op_dev_id = GetOpDeviceID(var_name_on_devices, *op);
      if (op_dev_id == -1) {  // var on all device
        CreateComputationalOps(&result, *op, places_.size());
      } else {
        CreateComputationalOp(&result, *op, op_dev_id);
        for (auto &var_name : op->OutputArgumentNames()) {
          var_name_on_devices[op_dev_id].emplace(var_name);
        }
      }
C
chengduoZH 已提交
206
      if (!is_forwarding && places_.size() > 1) {
Y
Yu Yang 已提交
207
        // Currently, we assume that once gradient is generated, it can be
Y
Yu Yang 已提交
208
        // broadcast, and each gradient is only broadcast once.
Y
yuyang18 已提交
209 210 211
        if (static_cast<bool>(boost::get<int>(op->GetAttr(
                                  OpProtoAndCheckerMaker::OpRoleAttrName())) &
                              static_cast<int>(OpRole::kBackward))) {
Y
yuyang18 已提交
212 213 214 215 216 217 218
          try {
            auto backward_vars =
                boost::get<std::vector<std::string>>(op->GetNullableAttr(
                    OpProtoAndCheckerMaker::OpRoleVarAttrName()));

            PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0);

Y
Fix bug  
yuyang18 已提交
219
            for (size_t i = 0; i < backward_vars.size(); i += 2) {
Y
yuyang18 已提交
220 221
              auto &p_name = backward_vars[i];
              auto &g_name = backward_vars[i + 1];
Y
yuyang18 已提交
222 223
              VLOG(10) << "Bcast " << g_name << " for parameter " << p_name;

Y
yuyang18 已提交
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
              switch (strategy_.reduce_) {
                case BuildStrategy::ReduceStrategy::kReduce:
                  CreateReduceOp(&result, g_name, cur_device_id);
                  var_name_on_devices[cur_device_id].emplace(g_name);
                  bcast_var_name_set[cur_device_id].emplace(p_name);
                  cur_device_id = (cur_device_id + 1) % places_.size();
                  break;
                case BuildStrategy::ReduceStrategy::kAllReduce:
                  if (IsSparseGradient(var_types, g_name)) {
                    CreateReduceOp(&result, g_name, 0);
                    CreateBroadcastOp(&result, g_name, 0);
                  } else {
                    InsertNCCLAllReduceOp(&result, g_name);
                  }
                  break;
              }
C
chengduoZH 已提交
240
            }
Y
yuyang18 已提交
241
          } catch (boost::bad_get e) {
Y
Yu Yang 已提交
242 243 244 245 246 247
          }
        }
      }
    }
  }

C
chengduoZH 已提交
248 249 250 251 252 253 254
  // Insert BCast Ops
  for (size_t dev_id = 0; dev_id < bcast_var_name_set.size(); ++dev_id) {
    auto &to_bcast_set = bcast_var_name_set[dev_id];
    for (auto &bcast_name : to_bcast_set) {
      CreateBroadcastOp(&result, bcast_name, dev_id);
    }
  }
Y
Yu Yang 已提交
255 256 257 258 259
  /*
    Dependency graph has been constructed. However, there are still data
    harzaeds need to be handled.
   */
  PolishGraphToSupportDataHazards(&result);
Y
Yu Yang 已提交
260

Y
Yu Yang 已提交
261 262 263 264 265
  /*
   * Only variables should be the leaves of graph.
   */
  AddOutputToLeafOps(&result);

Y
Yu Yang 已提交
266
  if (VLOG_IS_ON(10)) {
Y
Yancey1989 已提交
267 268
    std::ofstream fout("/tmp/graph.dot");
    PrintGraphviz(*graph, fout);
Y
Yu Yang 已提交
269 270
  }

Y
Yu Yang 已提交
271
  return std::unique_ptr<SSAGraph>(graph);
Y
Yu Yang 已提交
272 273
}

C
fix ci  
chengduoZH 已提交
274 275 276 277 278 279 280 281
bool MultiDevSSAGraphBuilder::IsSparseGradient(
    const std::unordered_map<std::string, proto::VarType::Type> &var_types,
    const std::string &og) const {
  PADDLE_ENFORCE(var_types.count(og) != 0);
  if (var_types.at(og) == proto::VarType::SELECTED_ROWS) {
    return true;
  }
  return false;
282 283
}

C
chengduoZH 已提交
284 285
void MultiDevSSAGraphBuilder::CreateBroadcastOp(SSAGraph *result,
                                                const std::string &p_name,
C
chengduoZH 已提交
286
                                                size_t src_dev_id) const {
C
chengduoZH 已提交
287 288 289 290 291 292 293
#ifdef PADDLE_WITH_CUDA
  auto *op_handle = new BroadcastOpHandle(local_scopes_, places_, nccl_ctxs_);
#else
  auto *op_handle = new BroadcastOpHandle(local_scopes_, places_);
#endif

  result->ops_.emplace_back(op_handle);
C
chengduoZH 已提交
294
  auto *in = result->vars_.at(src_dev_id).at(p_name).back().get();
C
chengduoZH 已提交
295 296 297
  op_handle->AddInput(in);

  for (size_t i = 0; i < places_.size(); ++i) {
C
chengduoZH 已提交
298
    auto &vars = result->vars_.at(i).at(p_name);
C
chengduoZH 已提交
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
    auto &p = places_[i];
    auto *out_var = new VarHandle(vars.size(), i, p_name, p);
    vars.emplace_back(out_var);
    op_handle->AddOutput(out_var);
#ifndef ADDLE_WITH_CUDA
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
#endif
  }
}

void MultiDevSSAGraphBuilder::CreateComputationalOp(SSAGraph *result,
                                                    const OpDesc &op,
                                                    int dev_id) const {
  result->ops_.emplace_back(
      new ComputationOpHandle(op, local_scopes_[dev_id], places_[dev_id]));
  CreateOpHandleIOs(result, op, dev_id);
}

Y
Yu Yang 已提交
318 319 320 321 322 323 324 325 326 327
void MultiDevSSAGraphBuilder::InsertNCCLAllReduceOp(
    SSAGraph *result, const std::string &og) const {
#ifdef PADDLE_WITH_CUDA
  result->ops_.emplace_back(
      new NCCLAllReduceOpHandle(local_scopes_, places_, *nccl_ctxs_));
  auto *op_handle = result->ops_.back().get();

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &p = places_[i];
    auto &vars = result->vars_[i][og];
Y
Yu Yang 已提交
328 329
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
Y
Yu Yang 已提交
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
    op_handle->AddInput(prev_grad.get());

    auto var = new VarHandle(vars.size() - 1, i, og, p);
    vars.emplace_back(var);
    op_handle->AddOutput(var);
  }
#else
  PADDLE_ENFORCE("Not implemented");
#endif
}

bool MultiDevSSAGraphBuilder::IsParameterGradientOnce(
    const std::string &og,
    std::unordered_set<std::string> *og_has_been_broadcast) const {
  bool is_pg_once =
      grad_names_.count(og) != 0 && og_has_been_broadcast->count(og) == 0;
  if (is_pg_once) {
    // Insert NCCL AllReduce Op
    og_has_been_broadcast->insert(og);
  }
  return is_pg_once;
}

C
chengduoZH 已提交
353 354 355
int MultiDevSSAGraphBuilder::GetOpDeviceID(
    const std::vector<std::unordered_set<std::string>> &var_name_on_devices,
    const OpDesc &op) const {
Y
yuyang18 已提交
356
  if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) {
C
chengduoZH 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
    return -1;
  }

  int var_dev_id = -1;
  for (auto &var_name : op.InputArgumentNames()) {
    if (var_dev_id != -1) break;
    for (size_t i = 0; i < var_name_on_devices.size(); ++i) {
      if (var_name_on_devices[i].count(var_name)) {
        var_dev_id = static_cast<int>(i);
        break;
      }
    }
  }
  return var_dev_id;
}

Y
Yu Yang 已提交
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(SSAGraph *result) const {
  for (size_t i = 0; i < places_.size(); ++i) {
// Insert ScaleCost OpHandle
#ifdef PADDLE_WITH_CUDA
    auto *communication_dev_ctx = nccl_ctxs_->DevCtx(places_[i]);
#else
    auto *communication_dev_ctx =
        platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
#endif

    auto *op_handle =
        new ScaleLossGradOpHandle(local_scopes_.size(), local_scopes_[i],
                                  places_[i], communication_dev_ctx);
    result->ops_.emplace_back(op_handle);

    // FIXME: Currently ScaleLossGradOp only use device_count as scale
    // factor. So it does not depend on any other operators.
    // VarHandle *loss = GetVarHandle(loss_var_name, place);
    // loss->pending_ops_.emplace_back(op_handle);
    // op_handle->inputs_.emplace_back(loss);

    CreateOpOutput(result, op_handle, GradVarName(loss_var_name_), places_[i],
                   i);
  }
}

void MultiDevSSAGraphBuilder::CreateComputationalOps(SSAGraph *result,
T
typhoonzero 已提交
400 401 402
                                                     const OpDesc &op,
                                                     size_t num_places) const {
  for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) {
Y
Yu Yang 已提交
403 404 405
    auto p = places_[scope_idx];
    auto s = local_scopes_[scope_idx];
    result->ops_.emplace_back(new ComputationOpHandle(op, s, p));
Y
Yu Yang 已提交
406
    CreateOpHandleIOs(result, op, scope_idx);
Y
Yu Yang 已提交
407 408 409
  }
}

C
chengduoZH 已提交
410 411 412
VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(SSAGraph *result,
                                                   const std::string &og,
                                                   int dst_dev_id) const {
C
chengduoZH 已提交
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
#ifdef PADDLE_WITH_CUDA
  result->ops_.emplace_back(
      new ReduceOpHandle(local_scopes_, places_, nccl_ctxs_));
#else
  result->ops_.emplace_back(new ReduceOpHandle(local_scopes_, places_));
#endif
  auto *op_handle = result->ops_.back().get();

  for (size_t i = 0; i < places_.size(); ++i) {
    auto &vars = result->vars_[i][og];
#ifndef PADDLE_WITH_CUDA
    auto &p = places_[i];
    op_handle->SetDeviceContext(p,
                                platform::DeviceContextPool::Instance().Get(p));
#endif
    PADDLE_ENFORCE(!vars.empty());
    auto &prev_grad = vars.back();
    op_handle->AddInput(prev_grad.get());
  }
  auto &vars = result->vars_[dst_dev_id][og];
  auto var =
      new VarHandle(vars.size() - 1, dst_dev_id, og, places_[dst_dev_id]);
  vars.emplace_back(var);
  op_handle->AddOutput(var);
  return var;
}

Y
fix pe  
Yancey1989 已提交
440 441
void MultiDevSSAGraphBuilder::ConnectOp(SSAGraph *result, OpHandleBase *op,
                                        const std::string &prev_op_name) const {
Y
Yancey1989 已提交
442
  for (auto &prev_op : result->ops_) {
Y
fix pe  
Yancey1989 已提交
443
    if (prev_op->Name() == prev_op_name) {
Y
Yancey1989 已提交
444 445 446
      auto *dep_var = new DummyVarHandle();
      prev_op->AddOutput(dep_var);
      result->dep_vars_.emplace(dep_var);
Y
fix pe  
Yancey1989 已提交
447
      op->AddInput(dep_var);
Y
Yancey1989 已提交
448 449 450 451
    }
  }
}

Y
Yancey1989 已提交
452 453 454 455 456 457 458 459
void MultiDevSSAGraphBuilder::CreateDistTrainOp(SSAGraph *result,
                                                const OpDesc &op) const {
  CreateComputationalOp(result, op, 0);
  if (op.Type() == "concat") {
    ConnectOp(result, result->ops_.back().get(), "fetch_barrier");
  }
}

Y
Yancey1989 已提交
460 461
void MultiDevSSAGraphBuilder::CreateRPCOp(SSAGraph *result,
                                          const OpDesc &op) const {
Y
Yu Yang 已提交
462 463
  auto &p = places_[0];
  auto *s = local_scopes_[0];
Y
Yancey1989 已提交
464
  result->ops_.emplace_back(new RPCOpHandle(op, s, p, op.Type()));
Y
fix pe  
Yancey1989 已提交
465

Y
Yancey1989 已提交
466
  if (op.Type() == "send_barrier") {
Y
fix pe  
Yancey1989 已提交
467
    ConnectOp(result, result->ops_.back().get(), "send_vars");
Y
Yancey1989 已提交
468
  } else if (op.Type() == "recv") {
Y
fix pe  
Yancey1989 已提交
469
    ConnectOp(result, result->ops_.back().get(), "send_barrier");
Y
Yancey1989 已提交
470
  } else if (op.Type() == "fetch_barrier") {
Y
fix pe  
Yancey1989 已提交
471
    ConnectOp(result, result->ops_.back().get(), "recv");
Y
Yancey1989 已提交
472
  } else if (op.Type() == "send_vars") {
Y
Yancey1989 已提交
473 474 475
    // do nothing
  } else {
    PADDLE_THROW(
Y
Yancey1989 已提交
476
        "rpc op should be in ["
Y
Yancey1989 已提交
477 478 479
        "send_vars, send_barrier. recv, fetch_barrier]");
  }

Y
Yancey1989 已提交
480 481
  // TODO(Yancey1989): schedule rpc op on different place may
  // increate throughput
Y
Yu Yang 已提交
482
  CreateOpHandleIOs(result, op, 0);
Y
Yu Yang 已提交
483 484 485
}

bool MultiDevSSAGraphBuilder::IsScaleLossOp(const OpDesc &op) const {
Y
yuyang18 已提交
486 487
  return boost::get<int>(
             op.GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) ==
Y
Fix bug  
yuyang18 已提交
488 489 490
             (static_cast<int>(OpRole::kBackward) |
              static_cast<int>(OpRole::kLoss)) &&
         !loss_var_name_.empty();  // If loss_var is empty. This is test mode
Y
Yu Yang 已提交
491
}
Y
Yu Yang 已提交
492 493 494
}  // namespace details
}  // namespace framework
}  // namespace paddle