op_handle_base.cc 7.3 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/details/op_handle_base.h"
15
#include <map>
16
#include <unordered_set>
Y
Yu Yang 已提交
17 18 19 20 21 22

namespace paddle {
namespace framework {
namespace details {
std::string OpHandleBase::DebugString() const {
  std::stringstream ss;
23
  ss << Name() << "(";
Y
Yu Yang 已提交
24 25 26 27 28 29 30 31 32 33 34
  for (auto *var : inputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ") --> (";
  for (auto *var : outputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ")\n";
  return ss.str();
}

Y
Yu Yang 已提交
35 36 37
OpHandleBase::~OpHandleBase() {
#ifdef PADDLE_WITH_CUDA
  for (auto &ev : events_) {
38 39 40
    if (ev.second) {
      PADDLE_ENFORCE(cudaEventDestroy(ev.second));
    }
Y
Yu Yang 已提交
41 42 43
  }
#endif
}
Y
Yu Yang 已提交
44

45
void OpHandleBase::InitCUDA() {
Y
Yu Yang 已提交
46
#ifdef PADDLE_WITH_CUDA
47 48 49 50 51 52 53 54 55 56 57 58 59
  for (auto &p : dev_ctxes_) {
    int dev_id = boost::get<platform::CUDAPlace>(p.first).device;
    PADDLE_ENFORCE(cudaSetDevice(dev_id));
    PADDLE_ENFORCE(
        cudaEventCreateWithFlags(&events_[dev_id], cudaEventDisableTiming));
  }
  if (IsMultiDeviceTransfer() && dev_ctxes_.size() > 0) {
    for (auto &out_var : outputs_) {
      auto *out_var_handle = dynamic_cast<VarHandle *>(out_var);
      if (out_var_handle) {
        int dev_id =
            boost::get<platform::CUDAPlace>(out_var_handle->place()).device;
        out_var_handle->SetGenerateEvent(events_.at(dev_id));
60
      }
61 62 63 64 65 66 67 68 69 70 71 72 73 74
    }
  } else {
    PADDLE_ENFORCE_EQ(dev_ctxes_.size(), 1UL,
                      "%s should have only one dev_ctx.", Name());
    auto &place = dev_ctxes_.begin()->first;
    int dev_id = boost::get<platform::CUDAPlace>(place).device;
    for (auto &out_var : outputs_) {
      auto *out_var_handle = dynamic_cast<VarHandle *>(out_var);
      if (out_var_handle) {
        PADDLE_ENFORCE(platform::is_same_place(place, out_var_handle->place()),
                       "The place of output(%s) is not consistent with the "
                       "place of current op(%s).",
                       out_var_handle->Name(), Name());
        out_var_handle->SetGenerateEvent(events_.at(dev_id));
75 76
      }
    }
Y
Yu Yang 已提交
77
  }
78 79
#endif
}
80

81 82 83 84 85 86
void OpHandleBase::Run(bool use_cuda) {
#ifdef PADDLE_WITH_CUDA
  if (events_.empty() && use_cuda && dev_ctxes_.size() > 0) {
    InitCUDA();
  }
#else
87
  PADDLE_ENFORCE(!use_cuda);
Y
Yu Yang 已提交
88 89 90 91 92
#endif

  RunImpl();
}

C
chengduoZH 已提交
93
void OpHandleBase::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) {
Y
Yu Yang 已提交
94
#ifdef PADDLE_WITH_CUDA
F
fengjiayi 已提交
95
  PADDLE_ENFORCE_NOT_NULL(waited_ctx);
C
chengduoZH 已提交
96
  if (platform::is_cpu_place(waited_ctx->GetPlace()) || events_.empty()) {
Y
Yu Yang 已提交
97
    for (auto &dev_ctx : dev_ctxes_) {
F
fengjiayi 已提交
98
      PADDLE_ENFORCE_NOT_NULL(dev_ctx.second);
Y
Yu Yang 已提交
99 100 101 102
      dev_ctx.second->Wait();
    }
  } else {
    auto stream =
C
chengduoZH 已提交
103
        static_cast<platform::CUDADeviceContext *>(waited_ctx)->stream();
Y
Yu Yang 已提交
104 105 106 107 108
    for (auto &ev : events_) {
      PADDLE_ENFORCE(cudaStreamWaitEvent(stream, ev.second, 0));
    }
  }
#else
Y
Yu Yang 已提交
109
  for (auto &dev_ctx : dev_ctxes_) {
Y
Yu Yang 已提交
110 111 112 113
    dev_ctx.second->Wait();
  }
#endif
}
Y
Yu Yang 已提交
114 115 116

void OpHandleBase::AddInput(VarHandleBase *in) {
  this->inputs_.emplace_back(in);
X
Xin Pan 已提交
117 118
  node_->inputs.push_back(in->Node());
  in->AddOutput(this, this->Node());
Y
Yu Yang 已提交
119 120 121 122
}

void OpHandleBase::AddOutput(VarHandleBase *out) {
  outputs_.emplace_back(out);
X
Xin Pan 已提交
123 124
  node_->outputs.push_back(out->Node());
  out->AddInput(this, this->Node());
Y
Yu Yang 已提交
125 126
}

C
chengduoZH 已提交
127 128 129
void OpHandleBase::WaitInputVarGenerated() {
  for (auto in_var : inputs_) {
    if (NeedWait(in_var)) {
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
      // Dummy Variable is used to represent dependencies between operators, so
      // there doesn't add event for it.
      auto *in_var_handle = dynamic_cast<VarHandle *>(in_var);
      if (in_var_handle) {
        auto &place = in_var_handle->place();
        if (platform::is_gpu_place(place)) {
#ifdef PADDLE_WITH_CUDA
          auto stream =
              static_cast<platform::CUDADeviceContext *>(dev_ctxes_.at(place))
                  ->stream();
          PADDLE_ENFORCE(
              cudaStreamWaitEvent(stream, in_var_handle->GetEvent(), 0));
#else
          PADDLE_THROW("Doesn't compile the GPU.");
#endif
        }
        // There are nothing to do when the place is CPUPlace.
C
chengduoZH 已提交
147 148 149 150 151 152
      }
    }
  }
}

void OpHandleBase::WaitInputVarGenerated(const platform::Place &place) {
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
  for (auto in_var : inputs_) {
    if (NeedWait(in_var)) {
      // Dummy Variable is used to represent dependencies between operators, so
      // there doesn't add event for it.
      auto *in_var_handle = dynamic_cast<VarHandle *>(in_var);
      if (in_var_handle) {
        if (platform::is_gpu_place(in_var_handle->place())) {
#ifdef PADDLE_WITH_CUDA
          auto stream = static_cast<platform::CUDADeviceContext *>(
                            dev_ctxes_.at(in_var_handle->place()))
                            ->stream();
          PADDLE_ENFORCE(
              cudaStreamWaitEvent(stream, in_var_handle->GetEvent(), 0));
#else
          PADDLE_THROW("Doesn't compile the GPU.");
#endif
        }
        // There are nothing to do when the place is CPUPlace.
      }
C
chengduoZH 已提交
172 173 174 175
    }
  }
}

C
chengduoZH 已提交
176 177 178 179 180 181 182 183 184 185
size_t OpHandleBase::NoDummyInputSize() const {
  size_t cnt = 0;
  for (auto *in : inputs_) {
    if (dynamic_cast<DummyVarHandle *>(in) == nullptr) {
      ++cnt;
    }
  }
  return cnt;
}

C
chengduoZH 已提交
186
bool OpHandleBase::NeedWait(VarHandleBase *in_var) {
X
Xin Pan 已提交
187
  return in_var && in_var->GeneratedOp();
C
chengduoZH 已提交
188 189
}

Y
Yu Yang 已提交
190
void OpHandleBase::RunAndRecordEvent(const std::function<void()> &callback) {
191
  callback();
Y
Yu Yang 已提交
192 193 194
#ifdef PADDLE_WITH_CUDA
  if (!events_.empty()) {  // Use event
    for (auto &p : dev_ctxes_) {
195 196 197 198 199
      auto dev_id = boost::get<platform::CUDAPlace>(p.first).device;
      auto *cuda_dev_ctx = static_cast<platform::CUDADeviceContext *>(p.second);
      VLOG(10) << "cudadevicecontext:" << cuda_dev_ctx << ", dev_id:" << dev_id;
      PADDLE_ENFORCE_CUDA_SUCCESS(
          cudaEventRecord(events_.at(dev_id), cuda_dev_ctx->stream()));
C
chengduoZH 已提交
200 201 202 203 204
    }
  }
#endif
}

205 206
void OpHandleBase::RunAndRecordEvent(platform::Place p,
                                     const std::function<void()> &callback) {
Y
Yu Yang 已提交
207
#ifdef PADDLE_WITH_CUDA
208 209 210 211 212 213 214
  if (platform::is_cpu_place(p) || events_.empty()) {
    callback();
  } else {
    auto *ctx = dev_ctxes_.at(p);
    auto *cuda_ctx = static_cast<platform::CUDADeviceContext *>(ctx);
    cuda_ctx->RecordEvent(events_.at(boost::get<platform::CUDAPlace>(p).device),
                          callback);
Y
Yu Yang 已提交
215
  }
216
#else
Y
Yu Yang 已提交
217
  callback();
218 219 220
#endif
}

Y
Stash  
yuyang18 已提交
221 222 223 224 225 226 227 228 229 230
size_t OpHandleBase::NotReadyInputSize() const {
  std::unordered_set<VarHandleBase *> res;
  for (auto *var : inputs_) {
    if (var->GeneratedOp() != nullptr) {
      res.emplace(var);
    }
  }
  return res.size();
}

231 232 233 234 235 236 237 238 239 240 241
void OpHandleBase::SetLocalExecScopes(
    const std::unordered_map<Scope *, Scope *> &scope_map) {
  local_exec_scopes_.clear();
  auto scopes = GetLocalScopes();
  for (auto *scope : scopes) {
    auto iter = scope_map.find(scope);
    PADDLE_ENFORCE(iter != scope_map.end(), "Local scope not found");
    local_exec_scopes_.emplace_back(iter->second);
  }
}

Y
Yu Yang 已提交
242 243 244
}  // namespace details
}  // namespace framework
}  // namespace paddle