op_handle_base.cc 4.4 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/details/op_handle_base.h"
15
#include <map>
Y
Yu Yang 已提交
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33

namespace paddle {
namespace framework {
namespace details {
std::string OpHandleBase::DebugString() const {
  std::stringstream ss;
  ss << "(";
  for (auto *var : inputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ") --> (";
  for (auto *var : outputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ")\n";
  return ss.str();
}

Y
Yu Yang 已提交
34 35 36
OpHandleBase::~OpHandleBase() {
#ifdef PADDLE_WITH_CUDA
  for (auto &ev : events_) {
Y
Yu Yang 已提交
37
    PADDLE_ENFORCE(cudaEventDestroy(ev.second));
Y
Yu Yang 已提交
38 39 40
  }
#endif
}
Y
Yu Yang 已提交
41

42
void OpHandleBase::Run(bool use_cuda) {
Y
Yu Yang 已提交
43
#ifdef PADDLE_WITH_CUDA
44
  if (events_.empty() && use_cuda) {
Y
Yu Yang 已提交
45
    for (auto &p : dev_ctxes_) {
Y
Yu Yang 已提交
46
      int dev_id = boost::get<platform::CUDAPlace>(p.first).device;
Y
Yu Yang 已提交
47 48 49
      PADDLE_ENFORCE(cudaSetDevice(dev_id));
      PADDLE_ENFORCE(
          cudaEventCreateWithFlags(&events_[dev_id], cudaEventDisableTiming));
Y
Yu Yang 已提交
50 51 52
    }
  }
#else
53
  PADDLE_ENFORCE(!use_cuda);
Y
Yu Yang 已提交
54 55 56 57 58
#endif

  RunImpl();
}

C
chengduoZH 已提交
59
void OpHandleBase::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) {
Y
Yu Yang 已提交
60
#ifdef PADDLE_WITH_CUDA
C
chengduoZH 已提交
61
  if (platform::is_cpu_place(waited_ctx->GetPlace()) || events_.empty()) {
Y
Yu Yang 已提交
62
    for (auto &dev_ctx : dev_ctxes_) {
Y
Yu Yang 已提交
63 64 65 66
      dev_ctx.second->Wait();
    }
  } else {
    auto stream =
C
chengduoZH 已提交
67
        static_cast<platform::CUDADeviceContext *>(waited_ctx)->stream();
Y
Yu Yang 已提交
68 69 70 71 72
    for (auto &ev : events_) {
      PADDLE_ENFORCE(cudaStreamWaitEvent(stream, ev.second, 0));
    }
  }
#else
Y
Yu Yang 已提交
73
  for (auto &dev_ctx : dev_ctxes_) {
Y
Yu Yang 已提交
74 75 76 77
    dev_ctx.second->Wait();
  }
#endif
}
Y
Yu Yang 已提交
78 79 80 81 82 83 84 85 86 87 88

void OpHandleBase::AddInput(VarHandleBase *in) {
  this->inputs_.emplace_back(in);
  in->pending_ops_.insert(this);
}

void OpHandleBase::AddOutput(VarHandleBase *out) {
  outputs_.emplace_back(out);
  out->generated_op_ = this;
}

C
chengduoZH 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
void OpHandleBase::WaitInputVarGenerated() {
  for (auto in_var : inputs_) {
    if (NeedWait(in_var)) {
      for (auto &pair : dev_ctxes_) {
        in_var->generated_op_->RecordWaitEventOnCtx(pair.second);
      }
    }
  }
}

void OpHandleBase::WaitInputVarGenerated(const platform::Place &place) {
  for (auto *in : inputs_) {
    if (NeedWait(in)) {
      in->generated_op_->RecordWaitEventOnCtx(dev_ctxes_[place]);
    }
  }
}

C
chengduoZH 已提交
107 108 109 110 111 112 113 114 115 116
size_t OpHandleBase::NoDummyInputSize() const {
  size_t cnt = 0;
  for (auto *in : inputs_) {
    if (dynamic_cast<DummyVarHandle *>(in) == nullptr) {
      ++cnt;
    }
  }
  return cnt;
}

C
chengduoZH 已提交
117
bool OpHandleBase::NeedWait(VarHandleBase *in_var) {
C
chengduoZH 已提交
118
  return in_var && in_var->generated_op_;
C
chengduoZH 已提交
119 120
}

Y
Yu Yang 已提交
121 122 123 124
void OpHandleBase::RunAndRecordEvent(const std::function<void()> &callback) {
#ifdef PADDLE_WITH_CUDA
  if (!events_.empty()) {  // Use event
    std::function<void()> method = callback;
125 126 127
    // NOTE(zcd): device context must be ordered here because RecordEvent
    // will use a mutex to ensure the safe of multi-threads.
    std::map<platform::DeviceContext *, platform::Place> ordered_ctxes;
Y
Yu Yang 已提交
128
    for (auto &p : dev_ctxes_) {
129
      ordered_ctxes.emplace(p.second, p.first);
Y
Yu Yang 已提交
130
    }
131
    for (auto &p : ordered_ctxes) {
C
chengduoZH 已提交
132
      method = [method, p, this]() {
133 134 135
        static_cast<platform::CUDADeviceContext *>(p.first)->RecordEvent(
            events_.at(boost::get<platform::CUDAPlace>(p.second).device),
            method);
C
chengduoZH 已提交
136 137 138 139 140 141 142 143 144 145 146
      };
    }
    method();
  } else {
#endif
    callback();
#ifdef PADDLE_WITH_CUDA
  }
#endif
}

147 148
void OpHandleBase::RunAndRecordEvent(platform::Place p,
                                     const std::function<void()> &callback) {
Y
Yu Yang 已提交
149
#ifdef PADDLE_WITH_CUDA
150 151 152 153 154 155 156
  if (platform::is_cpu_place(p) || events_.empty()) {
    callback();
  } else {
    auto *ctx = dev_ctxes_.at(p);
    auto *cuda_ctx = static_cast<platform::CUDADeviceContext *>(ctx);
    cuda_ctx->RecordEvent(events_.at(boost::get<platform::CUDAPlace>(p).device),
                          callback);
Y
Yu Yang 已提交
157
  }
158
#else
Y
Yu Yang 已提交
159
  callback();
160 161 162
#endif
}

Y
Yu Yang 已提交
163 164 165
}  // namespace details
}  // namespace framework
}  // namespace paddle