op_handle_base.cc 2.7 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/details/op_handle_base.h"

namespace paddle {
namespace framework {
namespace details {
std::string OpHandleBase::DebugString() const {
  std::stringstream ss;
  ss << "(";
  for (auto *var : inputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ") --> (";
  for (auto *var : outputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ")\n";
  return ss.str();
}

Y
Yu Yang 已提交
34 35 36
OpHandleBase::~OpHandleBase() {
#ifdef PADDLE_WITH_CUDA
  for (auto &ev : events_) {
Y
Yu Yang 已提交
37
    PADDLE_ENFORCE(cudaEventDestroy(ev.second));
Y
Yu Yang 已提交
38 39 40
  }
#endif
}
Y
Yu Yang 已提交
41 42 43 44

void OpHandleBase::Run(bool use_event) {
#ifdef PADDLE_WITH_CUDA
  if (events_.empty() && use_event) {
Y
Yu Yang 已提交
45
    for (auto &p : dev_ctxes_) {
Y
Yu Yang 已提交
46
      int dev_id = boost::get<platform::CUDAPlace>(p.first).device;
Y
Yu Yang 已提交
47 48 49
      PADDLE_ENFORCE(cudaSetDevice(dev_id));
      PADDLE_ENFORCE(
          cudaEventCreateWithFlags(&events_[dev_id], cudaEventDisableTiming));
Y
Yu Yang 已提交
50 51 52 53 54 55 56 57 58 59
    }
  }
#else
  PADDLE_ENFORCE(!use_event);
#endif

  RunImpl();

#ifdef PADDLE_WITH_CUDA
  if (use_event) {
Y
Yu Yang 已提交
60
    for (auto &p : dev_ctxes_) {
Y
Yu Yang 已提交
61 62 63
      int dev_id = boost::get<platform::CUDAPlace>(p.first).device;
      auto stream =
          static_cast<platform::CUDADeviceContext *>(p.second)->stream();
Y
Yu Yang 已提交
64
      PADDLE_ENFORCE(cudaEventRecord(events_.at(dev_id), stream));
Y
Yu Yang 已提交
65 66 67 68 69 70 71 72
    }
  }
#endif
}

void OpHandleBase::Wait(platform::DeviceContext *waited_dev) {
#ifdef PADDLE_WITH_CUDA
  if (platform::is_cpu_place(waited_dev->GetPlace()) || events_.empty()) {
Y
Yu Yang 已提交
73
    for (auto &dev_ctx : dev_ctxes_) {
Y
Yu Yang 已提交
74 75 76 77 78 79 80 81 82 83
      dev_ctx.second->Wait();
    }
  } else {
    auto stream =
        static_cast<platform::CUDADeviceContext *>(waited_dev)->stream();
    for (auto &ev : events_) {
      PADDLE_ENFORCE(cudaStreamWaitEvent(stream, ev.second, 0));
    }
  }
#else
Y
Yu Yang 已提交
84
  for (auto &dev_ctx : dev_ctxes_) {
Y
Yu Yang 已提交
85 86 87 88
    dev_ctx.second->Wait();
  }
#endif
}
Y
Yu Yang 已提交
89 90 91 92 93 94 95 96 97 98 99

void OpHandleBase::AddInput(VarHandleBase *in) {
  this->inputs_.emplace_back(in);
  in->pending_ops_.insert(this);
}

void OpHandleBase::AddOutput(VarHandleBase *out) {
  outputs_.emplace_back(out);
  out->generated_op_ = this;
}

Y
Yu Yang 已提交
100 101 102
}  // namespace details
}  // namespace framework
}  // namespace paddle