op_handle_base.cc 2.7 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/details/op_handle_base.h"

namespace paddle {
namespace framework {
namespace details {
std::string OpHandleBase::DebugString() const {
  std::stringstream ss;
  ss << "(";
  for (auto *var : inputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ") --> (";
  for (auto *var : outputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ")\n";
  return ss.str();
}

Y
Yu Yang 已提交
34 35
OpHandleBase::~OpHandleBase() {
#ifdef PADDLE_WITH_CUDA
Y
Yu Yang 已提交
36 37 38
  for (auto &ctx : dev_ctx_) {
    ctx.second->Wait();
  }
Y
Yu Yang 已提交
39
  for (auto &ev : events_) {
Y
Yu Yang 已提交
40
    PADDLE_ENFORCE(cudaEventDestroy(ev.second));
Y
Yu Yang 已提交
41 42 43
  }
#endif
}
Y
Yu Yang 已提交
44 45 46 47 48 49

void OpHandleBase::Run(bool use_event) {
#ifdef PADDLE_WITH_CUDA
  if (events_.empty() && use_event) {
    for (auto &p : dev_ctx_) {
      int dev_id = boost::get<platform::CUDAPlace>(p.first).device;
Y
Yu Yang 已提交
50 51 52
      PADDLE_ENFORCE(cudaSetDevice(dev_id));
      PADDLE_ENFORCE(
          cudaEventCreateWithFlags(&events_[dev_id], cudaEventDisableTiming));
Y
Yu Yang 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66
    }
  }
#else
  PADDLE_ENFORCE(!use_event);
#endif

  RunImpl();

#ifdef PADDLE_WITH_CUDA
  if (use_event) {
    for (auto &p : dev_ctx_) {
      int dev_id = boost::get<platform::CUDAPlace>(p.first).device;
      auto stream =
          static_cast<platform::CUDADeviceContext *>(p.second)->stream();
Y
Yu Yang 已提交
67
      PADDLE_ENFORCE(cudaEventRecord(events_.at(dev_id), stream));
Y
Yu Yang 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
    }
  }
#endif
}

void OpHandleBase::Wait(platform::DeviceContext *waited_dev) {
#ifdef PADDLE_WITH_CUDA
  if (platform::is_cpu_place(waited_dev->GetPlace()) || events_.empty()) {
    for (auto &dev_ctx : dev_ctx_) {
      dev_ctx.second->Wait();
    }
  } else {
    auto stream =
        static_cast<platform::CUDADeviceContext *>(waited_dev)->stream();
    for (auto &ev : events_) {
      PADDLE_ENFORCE(cudaStreamWaitEvent(stream, ev.second, 0));
    }
  }
#else
  for (auto &dev_ctx : dev_ctx_) {
    dev_ctx.second->Wait();
  }
#endif
}
Y
Yu Yang 已提交
92 93 94 95 96 97 98 99 100 101 102

void OpHandleBase::AddInput(VarHandleBase *in) {
  this->inputs_.emplace_back(in);
  in->pending_ops_.insert(this);
}

void OpHandleBase::AddOutput(VarHandleBase *out) {
  outputs_.emplace_back(out);
  out->generated_op_ = this;
}

Y
Yu Yang 已提交
103 104 105
}  // namespace details
}  // namespace framework
}  // namespace paddle