op_handle_base.cc 3.2 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/details/op_handle_base.h"

namespace paddle {
namespace framework {
namespace details {
C
chengduoZH 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34

// GetTensorFromVar is used in broadcast_op handle and gather_op handle, so it
// should be placed in a commonplace. I don't find an appropriate place, so I
// temporarily place it in op_handle_base.
Tensor *GetTensorFromVar(Variable *in_var) {
  if (in_var->IsType<LoDTensor>()) {
    return in_var->GetMutable<LoDTensor>();
  } else if (in_var->IsType<SelectedRows>()) {
    return in_var->GetMutable<SelectedRows>()->mutable_value();
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
  return nullptr;
}

Y
Yu Yang 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48
std::string OpHandleBase::DebugString() const {
  std::stringstream ss;
  ss << "(";
  for (auto *var : inputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ") --> (";
  for (auto *var : outputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ")\n";
  return ss.str();
}

Y
Yu Yang 已提交
49 50 51
OpHandleBase::~OpHandleBase() {
#ifdef PADDLE_WITH_CUDA
  for (auto &ev : events_) {
Y
Yu Yang 已提交
52
    PADDLE_ENFORCE(cudaEventDestroy(ev.second));
Y
Yu Yang 已提交
53 54 55
  }
#endif
}
Y
Yu Yang 已提交
56 57 58 59

void OpHandleBase::Run(bool use_event) {
#ifdef PADDLE_WITH_CUDA
  if (events_.empty() && use_event) {
Y
Yu Yang 已提交
60
    for (auto &p : dev_ctxes_) {
Y
Yu Yang 已提交
61
      int dev_id = boost::get<platform::CUDAPlace>(p.first).device;
Y
Yu Yang 已提交
62 63 64
      PADDLE_ENFORCE(cudaSetDevice(dev_id));
      PADDLE_ENFORCE(
          cudaEventCreateWithFlags(&events_[dev_id], cudaEventDisableTiming));
Y
Yu Yang 已提交
65 66 67 68 69 70 71 72 73 74
    }
  }
#else
  PADDLE_ENFORCE(!use_event);
#endif

  RunImpl();

#ifdef PADDLE_WITH_CUDA
  if (use_event) {
Y
Yu Yang 已提交
75
    for (auto &p : dev_ctxes_) {
Y
Yu Yang 已提交
76 77 78
      int dev_id = boost::get<platform::CUDAPlace>(p.first).device;
      auto stream =
          static_cast<platform::CUDADeviceContext *>(p.second)->stream();
Y
Yu Yang 已提交
79
      PADDLE_ENFORCE(cudaEventRecord(events_.at(dev_id), stream));
Y
Yu Yang 已提交
80 81 82 83 84 85 86 87
    }
  }
#endif
}

void OpHandleBase::Wait(platform::DeviceContext *waited_dev) {
#ifdef PADDLE_WITH_CUDA
  if (platform::is_cpu_place(waited_dev->GetPlace()) || events_.empty()) {
Y
Yu Yang 已提交
88
    for (auto &dev_ctx : dev_ctxes_) {
Y
Yu Yang 已提交
89 90 91 92 93 94 95 96 97 98
      dev_ctx.second->Wait();
    }
  } else {
    auto stream =
        static_cast<platform::CUDADeviceContext *>(waited_dev)->stream();
    for (auto &ev : events_) {
      PADDLE_ENFORCE(cudaStreamWaitEvent(stream, ev.second, 0));
    }
  }
#else
Y
Yu Yang 已提交
99
  for (auto &dev_ctx : dev_ctxes_) {
Y
Yu Yang 已提交
100 101 102 103
    dev_ctx.second->Wait();
  }
#endif
}
Y
Yu Yang 已提交
104 105 106 107 108 109 110 111 112 113 114

void OpHandleBase::AddInput(VarHandleBase *in) {
  this->inputs_.emplace_back(in);
  in->pending_ops_.insert(this);
}

void OpHandleBase::AddOutput(VarHandleBase *out) {
  outputs_.emplace_back(out);
  out->generated_op_ = this;
}

Y
Yu Yang 已提交
115 116 117
}  // namespace details
}  // namespace framework
}  // namespace paddle