parallel_executor.cc 23.6 KB
Newer Older
Y
Yang Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/framework/parallel_executor.h"
Y
Yu Yang 已提交
16 17
#include "ThreadPool.h"
#include "executor.h"
Y
Yu Yang 已提交
18
#include "lod_tensor.h"
Y
Yu Yang 已提交
19
#include "lod_tensor_array.h"
Y
Yu Yang 已提交
20
#include "op_registry.h"
Y
Yu Yang 已提交
21
#include "paddle/fluid/operators/math/concat.h"
Y
Yang Yang 已提交
22 23

namespace paddle {
Y
Yu Yang 已提交
24 25
namespace framework {

Y
Yu Yang 已提交
26 27 28 29 30 31
#ifdef PADDLE_WITH_CUDA

// FIXME: CHECK the return value of x;
#define NCCL_INVOKE(x) x
#endif

Y
Yu Yang 已提交
32 33
struct OpHandle;

Y
Yu Yang 已提交
34 35 36 37 38
struct VarHandleBase {
  virtual ~VarHandleBase() {}
  virtual std::string DebugString() const = 0;

  OpHandle *generated_op_;
Y
Yu Yang 已提交
39
  std::unordered_set<OpHandle *> pending_ops_;
Y
Yu Yang 已提交
40 41 42 43 44 45 46 47 48
};

struct VarHandle : public VarHandleBase {
  std::string DebugString() const override {
    std::stringstream ss;
    ss << name_ << ":" << place_;
    return ss.str();
  }

Y
Yu Yang 已提交
49 50
  // version field currently is not used, however, just store the version to
  // debug easily.
Y
Yu Yang 已提交
51 52 53
  size_t version_;
  std::string name_;
  platform::Place place_;
Y
Yu Yang 已提交
54
};
Y
Yu Yang 已提交
55

Y
Yu Yang 已提交
56 57 58 59
struct DummyVarHandle : public VarHandleBase {
  std::string DebugString() const override { return "dummy"; }
};

Y
Yu Yang 已提交
60
struct DependencyVarHandle : public VarHandleBase {
Y
Yu Yang 已提交
61
  std::string DebugString() const override { return "Dependency Variable"; }
Y
Yu Yang 已提交
62 63 64
};

struct OpHandle {
Y
Yu Yang 已提交
65 66 67 68 69
  std::vector<VarHandleBase *> inputs_;
  std::vector<VarHandleBase *> outputs_;
  std::unordered_map<platform::Place, platform::DeviceContext *,
                     platform::PlaceHash>
      dev_ctx_;
Y
Yu Yang 已提交
70 71 72 73 74

  std::string DebugString() {
    std::stringstream ss;
    ss << "(";
    for (auto *var : inputs_) {
Y
Yu Yang 已提交
75
      ss << var->DebugString() << ", ";
Y
Yu Yang 已提交
76 77 78
    }
    ss << ") --> (";
    for (auto *var : outputs_) {
Y
Yu Yang 已提交
79
      ss << var->DebugString() << ", ";
Y
Yu Yang 已提交
80 81 82 83 84 85
    }
    ss << ")\n";
    return ss.str();
  }

  virtual ~OpHandle() {}
Y
Yu Yang 已提交
86

Y
Yu Yang 已提交
87
  virtual void Run() { PADDLE_THROW("Not implemented"); }
Y
Yu Yang 已提交
88
  virtual void Wait(platform::DeviceContext *waited_dev) {}
Y
Yu Yang 已提交
89 90 91 92
};

struct ComputationOpHandle : public OpHandle {
  std::unique_ptr<OperatorBase> op_;
Y
Yu Yang 已提交
93 94
  Scope *scope_;
  platform::Place place_;
Y
Yu Yang 已提交
95

Y
Yu Yang 已提交
96 97
  explicit ComputationOpHandle(const OpDesc &op_desc, Scope *scope,
                               platform::Place place)
Y
Yu Yang 已提交
98
      : op_(framework::OpRegistry::CreateOp(op_desc)),
Y
Yu Yang 已提交
99
        scope_(scope),
Y
Yu Yang 已提交
100 101 102 103
        place_(place) {}

  void Run() override {
    // Wait other op if necessary
Y
Yu Yang 已提交
104 105 106 107
    if (platform::is_gpu_place(place_)) {
      int dev_id = boost::get<platform::CUDAPlace>(place_).device;
      cudaSetDevice(dev_id);
    }
Y
Yu Yang 已提交
108 109 110
    auto *cur_ctx = dev_ctx_[place_];
    for (auto *in : inputs_) {
      if (in->generated_op_ && in->generated_op_->dev_ctx_[place_] != cur_ctx) {
Y
Yu Yang 已提交
111
        in->generated_op_->Wait(cur_ctx);
Y
Yu Yang 已提交
112 113 114 115 116
      }
    }

    op_->Run(*scope_, place_);
  }
Y
Yu Yang 已提交
117 118 119 120

  void Wait(platform::DeviceContext *waited_dev) override {
    this->dev_ctx_.at(place_)->Wait();
  }
Y
Yu Yang 已提交
121 122
};

Y
Yu Yang 已提交
123 124 125 126 127 128 129 130 131 132 133 134 135
struct ScaleLossGradOpHandle : public OpHandle {
  float coeff_;
  Scope *scope_;
  platform::Place place_;

  explicit ScaleLossGradOpHandle(size_t num_dev, Scope *scope,
                                 platform::Place place)
      : coeff_(static_cast<float>(1.0 / num_dev)),
        scope_(scope),
        place_(place) {}

  void Run() override {
    std::string var_name = static_cast<VarHandle *>(this->outputs_[0])->name_;
Y
Yu Yang 已提交
136

Y
Yu Yang 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149 150
    float *tmp = scope_->FindVar(var_name)
                     ->GetMutable<framework::LoDTensor>()
                     ->mutable_data<float>(make_ddim({1}), place_);

    if (platform::is_cpu_place(place_)) {
      *tmp = coeff_;
    } else {
      memory::Copy(
          boost::get<platform::CUDAPlace>(place_), tmp, platform::CPUPlace(),
          &coeff_, sizeof(float),
          static_cast<platform::CUDADeviceContext *>(this->dev_ctx_[place_])
              ->stream());
    }
  }
Y
Yu Yang 已提交
151 152 153 154

  void Wait(platform::DeviceContext *waited_dev) override {
    this->dev_ctx_.at(place_)->Wait();
  }
Y
Yu Yang 已提交
155 156
};

Y
Yu Yang 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
struct FetchedData {
 public:
  std::vector<framework::LoDTensor> tensors_;

  explicit FetchedData(size_t num_fetched) { tensors_.resize(num_fetched); }
};

struct FetchOpHandle : public OpHandle {
  std::shared_ptr<FetchedData> data_;
  size_t offset_;
  std::vector<Scope *> *local_scopes_;
  std::vector<LoDTensor> tensors_;

  ~FetchOpHandle() {
    for (auto *input_var : inputs_) {
      input_var->pending_ops_.erase(this);
    }
    for (auto &pair : dev_ctx_) {
      pair.second->Wait();
    }

    // Lazily merge tensors. Will faster code.
    MergeTensors();
  }

  void Run() override {
Y
Debug  
Yu Yang 已提交
183 184 185 186
    for (auto *input : inputs_) {
      input->generated_op_->Wait(nullptr);
    }

Y
Yu Yang 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
    tensors_.resize(inputs_.size());
    auto *var = static_cast<VarHandle *>(inputs_[0]);
    auto &var_name = var->name_;
    platform::CPUPlace cpu;
    auto &scopes = *local_scopes_;

    for (size_t i = 0; i < scopes.size(); ++i) {
      auto &scope = scopes[i];
      auto &t = scope->FindVar(var_name)->Get<framework::LoDTensor>();
      if (platform::is_gpu_place(var->place_)) {
        TensorCopy(t, cpu, *dev_ctx_[t.place()], &tensors_[i]);
      } else {
        tensors_[i].ShareDataWith(t);
        tensors_[i].set_lod(t.lod());
      }
    }
  }

  void Wait(platform::DeviceContext *waited_dev) override {
    PADDLE_THROW("Nobody should wait FetchOp. Unexpceted Error");
  }

 private:
  void MergeTensors() const {
    std::vector<const LoDTensor *> tensors_ptr;
    for (auto &t : tensors_) {
      tensors_ptr.emplace_back(&t);
    }
    data_->tensors_[offset_].MergeLoDTensor(tensors_ptr, platform::CPUPlace());
  }
};

Y
Yu Yang 已提交
219 220
class ParallelExecutorPrivate {
 public:
Y
Yu Yang 已提交
221 222 223
  explicit ParallelExecutorPrivate(size_t num_threads = 12)
      : pool_(num_threads) {}

Y
Stash  
Yu Yang 已提交
224 225
  std::vector<platform::Place> places_;

Y
Yu Yang 已提交
226
  std::vector<Scope *> local_scopes_;
Y
Yu Yang 已提交
227
  Scope *global_scope_;
Y
Yu Yang 已提交
228

Y
Yu Yang 已提交
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
#ifdef PADDLE_WITH_CUDA
  struct NCCLContext {
    std::unique_ptr<platform::CUDADeviceContext> ctx_;
    ncclComm_t comm;

    explicit NCCLContext(int dev_id) {
      ctx_.reset(new platform::CUDADeviceContext(platform::CUDAPlace(dev_id)));
    }

    cudaStream_t stream() const { return ctx_->stream(); }

    int device_id() const {
      return boost::get<platform::CUDAPlace>(ctx_->GetPlace()).device;
    }

Y
Update  
Yu Yang 已提交
244 245
    static void InitNCCLContext(std::unordered_map<int, NCCLContext> &contexts,
                                const std::vector<platform::Place> &places) {
Y
Yu Yang 已提交
246 247 248 249 250
      std::vector<ncclComm_t> comms;
      std::vector<int> devs;
      comms.resize(contexts.size());
      devs.reserve(contexts.size());

Y
Update  
Yu Yang 已提交
251 252
      for (auto &p : places) {
        devs.push_back(boost::get<platform::CUDAPlace>(p).device);
Y
Yu Yang 已提交
253 254 255 256 257 258
      }

      NCCL_INVOKE(platform::dynload::ncclCommInitAll(
          &comms[0], static_cast<int>(contexts.size()), &devs[0]));

      int i = 0;
Y
Update  
Yu Yang 已提交
259 260
      for (auto &dev_id : devs) {
        contexts.at(dev_id).comm = comms[i++];
Y
Yu Yang 已提交
261 262 263 264
      }
    }
  };

Y
Update  
Yu Yang 已提交
265
  std::unordered_map<int, NCCLContext> communication_streams_;
Y
Yu Yang 已提交
266 267 268 269 270 271 272 273

  NCCLContext &GetNCCLCtx(platform::Place p) {
    int dev_id = boost::get<platform::CUDAPlace>(p).device;
    return communication_streams_.at(dev_id);
  }

#endif

Y
Yu Yang 已提交
274 275 276 277 278 279 280 281 282 283 284 285 286
  platform::DeviceContext *CommunicationDevCtx(const platform::Place &place) {
    if (platform::is_cpu_place(place) || local_scopes_.size() == 1) {
      return const_cast<platform::DeviceContext *>(
          platform::DeviceContextPool::Instance().Get(place));
    } else {
#ifdef PADDLE_WITH_CUDA
      return GetNCCLCtx(place).ctx_.get();
#else
      PADDLE_THROW("Not compiled with CUDA")
#endif
    }
  }

Y
Yu Yang 已提交
287 288 289 290 291 292
  platform::Place main_place_;

  std::unordered_map<platform::Place,
                     std::unordered_map<std::string, std::map<int, VarHandle>>,
                     platform::PlaceHash>
      vars_;
Y
Yu Yang 已提交
293 294
  std::unordered_set<std::unique_ptr<VarHandleBase>> dep_vars_;

Y
Yu Yang 已提交
295
  std::vector<std::unique_ptr<OpHandle>> ops_;
Y
Yu Yang 已提交
296

Y
Yu Yang 已提交
297
  // Use a simpler thread pool, might be faster.
Y
Yu Yang 已提交
298
  ThreadPool pool_;
Y
Yu Yang 已提交
299 300

  std::unique_ptr<platform::EnforceNotMet> exception_;
Y
Yu Yang 已提交
301 302 303 304
};

// TODO(yy): Move this function somewhere
ncclDataType_t ToNCCLDataType(std::type_index type) {
Y
Stash  
Yu Yang 已提交
305 306 307 308 309 310 311 312 313
  if (type == typeid(float)) {  // NOLINT
    return ncclFloat;
  } else if (type == typeid(double)) {  // NOLINT
    return ncclDouble;
  } else if (type == typeid(int)) {  // NOLINT
    return ncclInt;
  } else {
    PADDLE_THROW("Not supported");
  }
Y
Yu Yang 已提交
314 315
}

Y
Yu Yang 已提交
316 317
struct NCCLAllReduceOpHandle : public OpHandle {
  ParallelExecutorPrivate *member_;
Y
Yu Yang 已提交
318
  std::vector<cudaEvent_t> events_;
Y
Yu Yang 已提交
319 320

  explicit NCCLAllReduceOpHandle(ParallelExecutorPrivate *member)
Y
Yu Yang 已提交
321 322 323 324 325 326 327 328 329 330 331 332
      : member_(member) {
    events_.resize(member_->places_.size());
    for (auto &ev : events_) {
      cudaEventCreateWithFlags(&ev, cudaEventDisableTiming);
    }
  }

  ~NCCLAllReduceOpHandle() {
    for (auto &ev : events_) {
      cudaEventDestroy(ev);
    }
  }
Y
Yu Yang 已提交
333 334 335 336 337 338 339 340 341 342

  void Run() override {
    if (this->inputs_.size() == 1) {
      return;  // No need to all reduce when GPU count = 1;
    } else {
      auto &var_name = static_cast<VarHandle *>(this->inputs_[0])->name_;

      int dtype = -1;
      size_t numel = 0;

Y
Update  
Yu Yang 已提交
343 344
      platform::dynload::ncclGroupStart();

Y
Yu Yang 已提交
345 346 347
      for (size_t i = 0; i < member_->local_scopes_.size(); ++i) {
        auto &p = member_->places_[i];
        auto *s = member_->local_scopes_[i];
Y
Yu Yang 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360
        int dev_id = boost::get<platform::CUDAPlace>(p).device;

        auto &lod_tensor = s->FindVar(var_name)->Get<framework::LoDTensor>();
        void *buffer = const_cast<void *>(lod_tensor.data<void>());
        if (dtype == -1) {
          dtype = ToNCCLDataType(lod_tensor.type());
        }

        if (numel == 0) {
          numel = static_cast<size_t>(lod_tensor.numel());
        }

        auto &nccl_ctx = member_->communication_streams_.at(dev_id);
Y
Yu Yang 已提交
361
        cudaSetDevice(dev_id);
Y
Update  
Yu Yang 已提交
362 363 364
        platform::dynload::ncclAllReduce(
            buffer, buffer, numel, static_cast<ncclDataType_t>(dtype), ncclSum,
            nccl_ctx.comm, nccl_ctx.stream());
Y
Yu Yang 已提交
365
        cudaEventRecord(events_[i], nccl_ctx.stream());
Y
Yu Yang 已提交
366 367
      }

Y
Update  
Yu Yang 已提交
368
      platform::dynload::ncclGroupEnd();
Y
Yu Yang 已提交
369 370
    }
  }
Y
Yu Yang 已提交
371 372

  void Wait(platform::DeviceContext *waited_dev) override {
Y
Yu Yang 已提交
373 374 375 376 377 378 379 380 381 382 383 384 385
    if (platform::is_cpu_place(
            waited_dev->GetPlace())) {  // Wait by CPU, just sync stream
      for (auto &pair : member_->communication_streams_) {
        pair.second.ctx_->Wait();
      }
    } else {
      if (events_.size() > 1) {
        auto stream =
            static_cast<platform::CUDADeviceContext *>(waited_dev)->stream();
        for (auto &ev : events_) {
          cudaStreamWaitEvent(stream, ev, 0);
        }
      }
Y
Debug  
Yu Yang 已提交
386
    }
Y
Yu Yang 已提交
387
  }
Y
Yu Yang 已提交
388 389
};

Y
Yu Yang 已提交
390 391 392 393 394 395
ParallelExecutor::ParallelExecutor(
    const std::vector<platform::Place> &places,
    const std::unordered_set<std::string> &params,
    const ProgramDesc &startup_program, const ProgramDesc &main_program,
    const std::string &loss_var_name, Scope *scope)
    : member_(new ParallelExecutorPrivate()) {
Y
Stash  
Yu Yang 已提交
396
  member_->places_ = places;
Y
Yu Yang 已提交
397
  member_->global_scope_ = scope;
Y
Yu Yang 已提交
398 399 400 401
  // Step 1. RunStartupProgram and Bcast the params to devs.
  Executor exe(places[0]);
  exe.Run(startup_program, scope, 0);
  // Create local scopes
Y
Yu Yang 已提交
402 403
  for (size_t i = 0; i < member_->places_.size(); ++i) {
    member_->local_scopes_.push_back(&scope->NewScope());
Y
Yu Yang 已提交
404 405 406 407
  }
  member_->main_place_ = places[0];

  // Bcast Parameters to all GPUs
Y
Yu Yang 已提交
408
  BuildNCCLCommunicator();
Y
Yu Yang 已提交
409 410 411
  if (platform::is_gpu_place(member_->main_place_) &&
      member_->local_scopes_.size() != 1) {  // Is CUDA
    BCastParamsToGPUs(startup_program);
Y
Yu Yang 已提交
412 413 414 415 416 417
  }
  // Startup Program has been run. All local scopes has correct parameters.

  // Step 2. Convert main_program to SSA form and dependency graph. Also, insert
  // ncclOp
  ConstructDependencyGraph(params, main_program, loss_var_name);
Y
Yu Yang 已提交
418 419

  // Step 3. Create vars in each scope;
Y
Yu Yang 已提交
420
  for (auto *scope : member_->local_scopes_) {
Y
Yu Yang 已提交
421 422 423 424 425 426 427 428
    for (auto *var : main_program.Block(0).AllVars()) {
      if (scope->FindVar(var->Name()) != nullptr) {
        continue;
      }

      InitializeVariable(scope->Var(var->Name()), var->GetType());
    }
  }
Y
Yu Yang 已提交
429 430 431 432 433
}

void ParallelExecutor::ConstructDependencyGraph(
    const std::unordered_set<std::string> &params,
    const ProgramDesc &main_program, const std::string &loss_var_name) const {
Y
Yu Yang 已提交
434
  std::unordered_set<std::string> grads;
Y
Yu Yang 已提交
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
  for (auto &each_param : params) {
    grads.insert(each_param + "@GRAD");
  }

  bool is_forwarding = true;
  for (auto *op : main_program.Block(0).AllOps()) {
    bool change_forward = false;

    if (!is_forwarding) {
      // FIXME(yy): Do not hard code like this
      if (op->OutputArgumentNames().size() == 1 &&
          op->OutputArgumentNames()[0] == loss_var_name + "@GRAD") {
        continue;  // Drop fill 1. for backward coeff;
      }
    }

Y
Yu Yang 已提交
451 452 453 454 455
    for (size_t i = 0; i < member_->places_.size(); ++i) {
      auto &p = member_->places_[i];
      auto *s = member_->local_scopes_[i];

      member_->ops_.emplace_back(new ComputationOpHandle(*op, s, p));
Y
Yu Yang 已提交
456
      auto *op_handle = member_->ops_.back().get();
Y
Yu Yang 已提交
457 458
      op_handle->dev_ctx_[p] = const_cast<platform::DeviceContext *>(
          platform::DeviceContextPool::Instance().Get(p));
Y
Yu Yang 已提交
459 460 461 462

      auto var_names = op->InputArgumentNames();

      for (auto &each_var_name : var_names) {
Y
Yu Yang 已提交
463
        VarHandle *var = GetVarHandle(each_var_name, p);
Y
Yu Yang 已提交
464
        op_handle->inputs_.emplace_back(var);
Y
Yu Yang 已提交
465
        var->pending_ops_.emplace(op_handle);
Y
Yu Yang 已提交
466 467 468 469
      }
      var_names = op->OutputArgumentNames();

      for (auto &each_var_name : var_names) {
Y
Yu Yang 已提交
470
        GenerateVar(op_handle, each_var_name, p);
Y
Yu Yang 已提交
471 472 473 474 475
      }

      if (is_forwarding) {
        if (var_names.size() == 1 && var_names[0] == loss_var_name) {
          // Insert ScaleCost OpHandle
Y
Yu Yang 已提交
476
          member_->ops_.emplace_back(new ScaleLossGradOpHandle(
Y
Yu Yang 已提交
477
              this->member_->local_scopes_.size(), s, p));
Y
Yu Yang 已提交
478
          op_handle = member_->ops_.back().get();
Y
Yu Yang 已提交
479

Y
Yu Yang 已提交
480
          op_handle->dev_ctx_[p] = member_->CommunicationDevCtx(p);
Y
Yu Yang 已提交
481

Y
Yu Yang 已提交
482 483 484 485 486 487
          // FIXME: Currently ScaleLossGradOp only use device_count as scale
          // factor. So it does not depend on any other operators.
          // VarHandle *loss = GetVarHandle(loss_var_name, place);
          // loss->pending_ops_.emplace_back(op_handle);
          // op_handle->inputs_.emplace_back(loss);

Y
Yu Yang 已提交
488
          GenerateVar(op_handle, loss_var_name + "@GRAD", p);
Y
Yu Yang 已提交
489 490 491 492 493 494 495 496 497 498 499 500 501 502
          change_forward = true;
        }
      }
    }

    if (change_forward) {
      is_forwarding = false;
    }

    if (!is_forwarding) {
      auto var_names = op->OutputArgumentNames();
      for (auto &og : var_names) {
        if (grads.count(og) != 0) {  // is param grad
          // Insert NCCL AllReduce Op
Y
Yu Yang 已提交
503
          member_->ops_.emplace_back(new NCCLAllReduceOpHandle(member_));
Y
Yu Yang 已提交
504 505
          auto *op_handle = member_->ops_.back().get();

Y
Yu Yang 已提交
506 507 508
          for (size_t i = 0; i < member_->places_.size(); ++i) {
            auto &p = member_->places_[i];
            auto &vars = member_->vars_[p][og];
Y
Yu Yang 已提交
509 510 511 512 513 514

            if (vars.empty()) {  // This device has no data. continue.
              continue;
            }
            auto *prev_grad = &vars[vars.size() - 1];
            op_handle->inputs_.emplace_back(prev_grad);
Y
Yu Yang 已提交
515
            prev_grad->pending_ops_.emplace(op_handle);
Y
Yu Yang 已提交
516
            auto &var = vars[vars.size()];
Y
Yu Yang 已提交
517
            var.place_ = p;
Y
Yu Yang 已提交
518 519 520 521
            var.generated_op_ = op_handle;
            var.name_ = og;
            var.version_ = vars.size() - 1;
            op_handle->outputs_.emplace_back(&var);
Y
Yu Yang 已提交
522
            op_handle->dev_ctx_[p] = member_->CommunicationDevCtx(p);
Y
Yu Yang 已提交
523 524 525 526 527
          }
        }
      }
    }
  }
Y
Yu Yang 已提交
528

Y
Yu Yang 已提交
529 530 531
  /*
    Dependency graph has been constructed. However, there are still data
    harzaeds need to be handled.
Y
Yu Yang 已提交
532
   */
Y
Yu Yang 已提交
533 534
  PolishGraphToSupportDataHarzaeds();
}
Y
Yu Yang 已提交
535

Y
Yu Yang 已提交
536 537 538 539 540 541 542 543
/**
 * We only handle write after read(WAR), since it should not have a write
 * after write in program. If there are write after write operators, we need
 * prune them.
 *
 * https://en.wikipedia.org/wiki/Hazard_(computer_architecture)#Write_after_read_(WAR)
 */
void ParallelExecutor::PolishGraphToSupportDataHarzaeds() const {
Y
Yu Yang 已提交
544 545 546 547 548 549 550 551 552 553 554
  for (auto &place_pair : member_->vars_) {
    for (auto &name_pair : place_pair.second) {
      if (name_pair.second.size() <= 1) {
        return;
      }
      auto it_new = name_pair.second.rbegin();
      auto it_old = name_pair.second.rbegin();
      ++it_old;
      for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) {
        auto *write_op = it_new->second.generated_op_;
        auto &read_ops = it_old->second.pending_ops_;
Y
Yu Yang 已提交
555 556 557 558 559 560
        auto *ex_write_op = it_old->second.generated_op_;

        if (ex_write_op == nullptr) {  // Nobody write this var.
          continue;
        }

Y
Yu Yang 已提交
561 562
        for (auto *read_op : read_ops) {
          // Manually add a dependency var from read_op to write_op;
Y
Yu Yang 已提交
563 564 565 566
          if (read_op == write_op) {
            // Read Write is the same op.
            continue;
          }
Y
Yu Yang 已提交
567 568

          auto *dep_var = new DependencyVarHandle();
Y
Yu Yang 已提交
569

Y
Yu Yang 已提交
570 571 572
          dep_var->generated_op_ = read_op;
          read_op->outputs_.emplace_back(dep_var);

Y
Yu Yang 已提交
573
          dep_var->pending_ops_.emplace(write_op);
Y
Yu Yang 已提交
574 575 576 577 578 579
          write_op->inputs_.emplace_back(dep_var);
          member_->dep_vars_.emplace(dep_var);
        }
      }
    }
  }
Y
Yu Yang 已提交
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
}

void ParallelExecutor::GenerateVar(OpHandle *op_handle,
                                   const std::string &each_var_name,
                                   const platform::Place &place) const {
  auto &vars = member_->vars_[place][each_var_name];
  size_t version = vars.size();
  auto &var = vars[version];
  var.version_ = version;
  var.generated_op_ = op_handle;
  var.name_ = each_var_name;
  var.place_ = place;
  op_handle->outputs_.emplace_back(&var);
}

VarHandle *ParallelExecutor::GetVarHandle(const std::string &each_var_name,
                                          const platform::Place &place) const {
  auto &var_holders = member_->vars_[place];
  auto &var_holder = var_holders[each_var_name];
  VarHandle *var = nullptr;
  if (var_holder.empty()) {
    auto &init_var = var_holder[0];
    init_var.place_ = place;
    init_var.name_ = each_var_name;
    init_var.generated_op_ = nullptr;
    init_var.version_ = 0;
    var = &init_var;
  } else {
    var = &var_holder.rbegin()->second;
  }
  return var;
}

void ParallelExecutor::BCastParamsToGPUs(
    const ProgramDesc &startup_program) const {
Y
Yu Yang 已提交
615
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
616
  auto *main_scope = member_->local_scopes_[0];
Y
Yu Yang 已提交
617

Y
Yu Yang 已提交
618 619 620 621 622 623 624 625
  for (auto *var_desc : startup_program.Block(0).AllVars()) {
    if (var_desc->GetType() == proto::VarType::LOD_TENSOR) {
      auto &main_tensor =
          main_scope->FindVar(var_desc->Name())->Get<LoDTensor>();
      ncclDataType_t data_type = ToNCCLDataType(main_tensor.type());
      auto &dims = main_tensor.dims();
      size_t numel = main_tensor.numel();

Y
Stash  
Yu Yang 已提交
626
      platform::dynload::ncclGroupStart();
Y
Yu Yang 已提交
627

Y
Update  
Yu Yang 已提交
628 629 630 631 632 633
      for (size_t i = 0; i < member_->places_.size(); ++i) {
        auto place = member_->places_[i];
        void *buffer;
        if (i == 0) {
          buffer = const_cast<void *>(main_tensor.data<void>());
        } else {
Y
Yu Yang 已提交
634
          auto local_scope = member_->local_scopes_[i];
Y
Update  
Yu Yang 已提交
635 636 637 638 639
          auto *t = local_scope->Var(var_desc->Name())->GetMutable<LoDTensor>();
          t->Resize(dims);
          buffer = t->mutable_data(place, main_tensor.type());
        }

Y
Stash  
Yu Yang 已提交
640
        auto &nccl_ctx = member_->GetNCCLCtx(place);
Y
Update  
Yu Yang 已提交
641
        platform::dynload::ncclBcast(buffer, numel, data_type, 0, nccl_ctx.comm,
Y
Stash  
Yu Yang 已提交
642
                                     nccl_ctx.stream());
Y
Yu Yang 已提交
643
      }
Y
Stash  
Yu Yang 已提交
644 645 646
      platform::dynload::ncclGroupEnd();
    }
  }
Y
Yu Yang 已提交
647 648 649 650
#else
  PADDLE_THROW("Not compiled with CUDA");
#endif
}
Y
Yu Yang 已提交
651

Y
Yu Yang 已提交
652 653
void ParallelExecutor::BuildNCCLCommunicator() const {
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
654
  for (auto &place : member_->places_) {
Y
Yu Yang 已提交
655
    int dev_id = boost::get<platform::CUDAPlace>(place).device;
Y
Yu Yang 已提交
656

Y
Yu Yang 已提交
657 658
    member_->communication_streams_.emplace(
        dev_id, ParallelExecutorPrivate::NCCLContext(dev_id));
Y
Yu Yang 已提交
659
  }
Y
Yu Yang 已提交
660 661

  ParallelExecutorPrivate::NCCLContext::InitNCCLContext(
Y
Update  
Yu Yang 已提交
662
      member_->communication_streams_, member_->places_);
Y
Yu Yang 已提交
663
#endif
Y
Yu Yang 已提交
664 665
}

Y
Yu Yang 已提交
666 667 668
void ParallelExecutor::Run(const std::vector<std::string> &fetch_tensors,
                           const std::string &fetched_var_name) {
  auto fetched_data = std::make_shared<FetchedData>(fetch_tensors.size());
Y
Yu Yang 已提交
669
  // Version --> VarHandle
Y
Yu Yang 已提交
670
  member_->exception_.reset();
Y
Yu Yang 已提交
671
  std::unordered_map<VarHandleBase *, std::atomic<bool>> pending_vars;
Y
Yu Yang 已提交
672
  std::unordered_map<OpHandle *, size_t> pending_ops;
Y
Yu Yang 已提交
673
  std::vector<DummyVarHandle> dummy_vars;
Y
Yu Yang 已提交
674 675 676 677

  for (auto &place_pair : member_->vars_) {
    for (auto &name_pair : place_pair.second) {
      for (auto &version_pair : name_pair.second) {
Y
Yu Yang 已提交
678 679
        pending_vars[&version_pair.second] =
            version_pair.second.generated_op_ == nullptr;
Y
Yu Yang 已提交
680 681 682 683
      }
    }
  }

Y
Yu Yang 已提交
684
  for (auto &var : member_->dep_vars_) {
Y
Yu Yang 已提交
685
    pending_vars[var.get()] = var->generated_op_ == nullptr;
Y
Yu Yang 已提交
686 687
  }

Y
Yu Yang 已提交
688 689
  std::vector<OpHandle *> to_run;

Y
Yu Yang 已提交
690
  for (auto &op : member_->ops_) {
Y
Yu Yang 已提交
691 692 693 694 695 696 697
    if (op->inputs_.empty()) {  // Special case, Op has no input.
      to_run.emplace_back(op.get());
    } else {
      pending_ops.insert({op.get(), op->inputs_.size()});
    }
  }

Y
Yu Yang 已提交
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719
  std::unordered_map<std::string, std::vector<VarHandleBase *>> fetched_vars;

  for (auto &fetch_var_name : fetch_tensors) {
    for (auto &pair : member_->vars_) {
      auto it = pair.second.find(fetch_var_name);
      if (it != pair.second.end()) {
        fetched_vars[fetch_var_name].push_back(&it->second.rbegin()->second);
      }
    }
  }

  std::vector<FetchOpHandle> fetch_ops;

  for (size_t i = 0; i < fetch_tensors.size(); ++i) {
    auto &var_name = fetch_tensors[i];
    auto &vars = fetched_vars[var_name];
    fetch_ops.emplace_back();
    FetchOpHandle *op = &fetch_ops.back();
    op->data_ = fetched_data;
    op->offset_ = i;
    op->local_scopes_ = &member_->local_scopes_;
    for (auto &p : member_->places_) {
Y
Yu Yang 已提交
720
      op->dev_ctx_[p] = member_->GetNCCLCtx(p).ctx_.get();
Y
Yu Yang 已提交
721 722 723 724 725 726
    }

    for (auto *var : vars) {
      var->pending_ops_.emplace(op);
      op->inputs_.emplace_back(var);
    }
Y
Yu Yang 已提交
727 728 729 730 731 732 733

    dummy_vars.emplace_back();
    auto *var = &dummy_vars.back();
    op->outputs_.emplace_back(var);
    var->generated_op_ = op;
    pending_vars[var] = false;

Y
Yu Yang 已提交
734 735 736
    pending_ops.insert({op, op->inputs_.size()});
  }

Y
Yu Yang 已提交
737
  for (auto *op : to_run) {
Y
Yu Yang 已提交
738
    RunOp(pending_vars, op);
Y
Yu Yang 已提交
739 740
  }

Y
Yu Yang 已提交
741
  while (!pending_vars.empty()) {
Y
Yu Yang 已提交
742
    VarHandleBase *ready_var = nullptr;
Y
Yu Yang 已提交
743
    for (auto &pair : pending_vars) {
Y
Yu Yang 已提交
744
      if (pair.second.load(std::memory_order_consume)) {
Y
Yu Yang 已提交
745
        ready_var = pair.first;
Y
Yu Yang 已提交
746 747
      }
    }
Y
Yu Yang 已提交
748
    if (ready_var == nullptr) {
Y
Yu Yang 已提交
749 750 751 752
      // FIXME use conditional var instead of busy wait.
      if (member_->exception_) {
        throw * member_->exception_;
      }
Y
Yu Yang 已提交
753
      continue;
Y
Yu Yang 已提交
754
    }
Y
Yu Yang 已提交
755
    pending_vars.erase(ready_var);
Y
Yu Yang 已提交
756
    to_run.clear();
Y
Yu Yang 已提交
757 758 759 760 761
    for (auto *op : ready_var->pending_ops_) {
      auto &deps = pending_ops[op];
      --deps;
      if (deps == 0) {
        to_run.emplace_back(op);
Y
Yu Yang 已提交
762 763 764 765
      }
    }
    for (auto *op : to_run) {
      pending_ops.erase(op);
Y
Yu Yang 已提交
766
      RunOp(pending_vars, op);
Y
Yu Yang 已提交
767 768
    }
  }
Y
Yu Yang 已提交
769

Y
Yu Yang 已提交
770 771 772
  fetch_ops.clear();
  *member_->global_scope_->Var(fetched_var_name)->GetMutable<LoDTensorArray>() =
      fetched_data->tensors_;
Y
Yu Yang 已提交
773
}
Y
Yu Yang 已提交
774

Y
Yu Yang 已提交
775
void ParallelExecutor::RunOp(
Y
Yu Yang 已提交
776
    std::unordered_map<VarHandleBase *, std::atomic<bool>> &pending_vars,
Y
Yu Yang 已提交
777
    OpHandle *op) const {
Y
Yu Yang 已提交
778 779
  std::vector<std::atomic<bool> *> *ready_buffer =
      new std::vector<std::atomic<bool> *>();
Y
Yu Yang 已提交
780
  for (auto *var : op->outputs_) {
Y
Debug  
Yu Yang 已提交
781
    ready_buffer->emplace_back(&pending_vars[var]);
Y
Yu Yang 已提交
782 783 784 785 786
  }

  auto op_run = [ready_buffer, op, this] {
    try {
      op->Run();
Y
Debug  
Yu Yang 已提交
787
      for (auto *ready : *ready_buffer) {
Y
Yu Yang 已提交
788
        ready->store(true, std::memory_order_release);
Y
Yu Yang 已提交
789
      }
Y
Debug  
Yu Yang 已提交
790
      delete ready_buffer;
Y
Yu Yang 已提交
791 792 793 794 795 796
    } catch (platform::EnforceNotMet ex) {
      member_->exception_.reset(new platform::EnforceNotMet(ex));
    } catch (...) {
      LOG(FATAL) << "Unknown exception catched";
    }
  };
Y
Yu Yang 已提交
797
  member_->pool_.enqueue(op_run);
Y
Yu Yang 已提交
798
}
Y
Yu Yang 已提交
799
}  // namespace framework
Y
Yang Yang 已提交
800
}  // namespace paddle