op_handle_base.cc 11.0 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
//   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/details/op_handle_base.h"
15

Y
Yu Yang 已提交
16 17 18 19 20
namespace paddle {
namespace framework {
namespace details {
std::string OpHandleBase::DebugString() const {
  std::stringstream ss;
21
  ss << Name() << "(";
Y
Yu Yang 已提交
22 23 24 25 26 27 28 29 30 31 32
  for (auto *var : inputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ") --> (";
  for (auto *var : outputs_) {
    ss << var->DebugString() << ", ";
  }
  ss << ")\n";
  return ss.str();
}

Z
Zeng Jinle 已提交
33
OpHandleBase::~OpHandleBase() PADDLE_MAY_THROW {
Y
Yu Yang 已提交
34 35
#ifdef PADDLE_WITH_CUDA
  for (auto &ev : events_) {
36
    if (ev.second) {
37
      PADDLE_ENFORCE_CUDA_SUCCESS(cudaEventDestroy(ev.second));
38
    }
Y
Yu Yang 已提交
39 40 41
  }
#endif
}
Y
Yu Yang 已提交
42

43
void OpHandleBase::InitCUDA() {
Y
Yu Yang 已提交
44
#ifdef PADDLE_WITH_CUDA
45
  for (auto &p : dev_ctxes_) {
46
    int dev_id = BOOST_GET_CONST(platform::CUDAPlace, p.first).device;
L
Leo Chen 已提交
47
    platform::SetDeviceId(dev_id);
48
    PADDLE_ENFORCE_CUDA_SUCCESS(
49 50 51 52 53 54 55
        cudaEventCreateWithFlags(&events_[dev_id], cudaEventDisableTiming));
  }
  if (IsMultiDeviceTransfer() && dev_ctxes_.size() > 0) {
    for (auto &out_var : outputs_) {
      auto *out_var_handle = dynamic_cast<VarHandle *>(out_var);
      if (out_var_handle) {
        int dev_id =
56 57
            BOOST_GET_CONST(platform::CUDAPlace, out_var_handle->place())
                .device;
58
        out_var_handle->SetGenerateEvent(events_.at(dev_id));
59
      }
60 61
    }
  } else {
62 63 64 65 66
    PADDLE_ENFORCE_EQ(
        dev_ctxes_.size(), 1UL,
        platform::errors::InvalidArgument(
            "Operator %s should have only one dev_ctx, but got %d.", Name(),
            dev_ctxes_.size()));
67
    auto &place = dev_ctxes_.begin()->first;
68
    int dev_id = BOOST_GET_CONST(platform::CUDAPlace, place).device;
69 70 71
    for (auto &out_var : outputs_) {
      auto *out_var_handle = dynamic_cast<VarHandle *>(out_var);
      if (out_var_handle) {
72 73 74 75 76 77
        PADDLE_ENFORCE_EQ(
            platform::is_same_place(place, out_var_handle->place()), true,
            platform::errors::InvalidArgument(
                "The place of output(%s) is not consistent with the "
                "place of current op(%s).",
                out_var_handle->Name(), Name()));
78
        out_var_handle->SetGenerateEvent(events_.at(dev_id));
79 80
      }
    }
Y
Yu Yang 已提交
81
  }
82 83 84 85
#else
  PADDLE_THROW(platform::errors::PermissionDenied(
      "Paddle can't use CUDA device since it's not compiled with CUDA,"
      "Please recompile or reinstall Paddle with GPU support."));
86 87
#endif
}
88

89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
void OpHandleBase::InitXPU() {
#ifdef PADDLE_WITH_XPU
  if (IsMultiDeviceTransfer() && dev_ctxes_.size() > 0) {
    for (auto &out_var : outputs_) {
      auto *out_var_handle = dynamic_cast<VarHandle *>(out_var);
      if (out_var_handle) {
        // TODO(liuyuhui): XPU now don't support sync events, add later.
      }
    }
  } else {
    PADDLE_ENFORCE_EQ(dev_ctxes_.size(), 1UL,
                      platform::errors::InvalidArgument(
                          "%s should have only one dev_ctx.", Name()));
    auto &place = dev_ctxes_.begin()->first;
    int dev_id = BOOST_GET_CONST(platform::XPUPlace, place).device;
    PADDLE_ENFORCE_EQ(
        xpu_set_device(dev_id), XPU_SUCCESS,
        platform::errors::PreconditionNotMet("xpu_set_device failed"));
    for (auto &out_var : outputs_) {
      auto *out_var_handle = dynamic_cast<VarHandle *>(out_var);
      if (out_var_handle) {
        PADDLE_ENFORCE_EQ(
            platform::is_same_place(place, out_var_handle->place()), true,
            platform::errors::InvalidArgument(
                "The place of output(%s) is not consistent with the "
                "place of current op(%s).",
                out_var_handle->Name(), Name()));
      }
    }
  }
#else
  PADDLE_THROW(platform::errors::PermissionDenied(
      "Paddle can't use XPU device since it's not compiled with XPU,"
      "Please recompile or reinstall Paddle with XPU support."));
#endif
}

void OpHandleBase::Run(DeviceType use_device) {
127
#ifdef PADDLE_WITH_CUDA
128
  if (events_.empty() && use_device == p::kCUDA && dev_ctxes_.size() > 0) {
129 130 131
    InitCUDA();
  }
#else
132 133 134 135 136
  PADDLE_ENFORCE_NE(
      use_device, p::kCUDA,
      platform::errors::InvalidArgument(
          "Argument use_device should not be kCUDA when Paddle is not "
          "compiled with CUDA."));
Y
Yu Yang 已提交
137 138
#endif

139 140 141 142 143 144 145 146 147 148 149 150
  if (use_device == p::kXPU && dev_ctxes_.size() > 0) {
#ifdef PADDLE_WITH_XPU
    InitXPU();
#else
    PADDLE_ENFORCE_NE(
        use_device, p::kXPU,
        platform::errors::InvalidArgument(
            "Argument use_device should not be kXPU when Paddle is not "
            "compiled with XPU."));
#endif
  }

151 152 153 154 155 156
  // skip running current op, used with inplace_addto_op_pass
  if (skip_running_) {
    VLOG(4) << "skip running: " << Name();
    return;
  }

Y
Yu Yang 已提交
157 158 159
  RunImpl();
}

C
chengduoZH 已提交
160
void OpHandleBase::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) {
Y
Yu Yang 已提交
161
#ifdef PADDLE_WITH_CUDA
162 163
  PADDLE_ENFORCE_NOT_NULL(waited_ctx, platform::errors::InvalidArgument(
                                          "Argument waited_ctx is NULL."));
C
chengduoZH 已提交
164
  if (platform::is_cpu_place(waited_ctx->GetPlace()) || events_.empty()) {
Y
Yu Yang 已提交
165
    for (auto &dev_ctx : dev_ctxes_) {
166 167 168
      PADDLE_ENFORCE_NOT_NULL(
          dev_ctx.second,
          platform::errors::InvalidArgument("The device context is NULL."));
Y
Yu Yang 已提交
169 170 171 172
      dev_ctx.second->Wait();
    }
  } else {
    auto stream =
C
chengduoZH 已提交
173
        static_cast<platform::CUDADeviceContext *>(waited_ctx)->stream();
Y
Yu Yang 已提交
174
    for (auto &ev : events_) {
175
      PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamWaitEvent(stream, ev.second, 0));
Y
Yu Yang 已提交
176 177 178
    }
  }
#else
Y
Yu Yang 已提交
179
  for (auto &dev_ctx : dev_ctxes_) {
Y
Yu Yang 已提交
180 181 182 183
    dev_ctx.second->Wait();
  }
#endif
}
Y
Yu Yang 已提交
184 185 186

void OpHandleBase::AddInput(VarHandleBase *in) {
  this->inputs_.emplace_back(in);
X
Xin Pan 已提交
187 188
  node_->inputs.push_back(in->Node());
  in->AddOutput(this, this->Node());
Y
Yu Yang 已提交
189 190 191 192
}

void OpHandleBase::AddOutput(VarHandleBase *out) {
  outputs_.emplace_back(out);
X
Xin Pan 已提交
193 194
  node_->outputs.push_back(out->Node());
  out->AddInput(this, this->Node());
Y
Yu Yang 已提交
195 196
}

197
void OpHandleBase::WaitInputVarGenerated(bool wait_for_feed) {
C
chengduoZH 已提交
198 199
  for (auto in_var : inputs_) {
    if (NeedWait(in_var)) {
200 201 202 203 204 205 206 207 208 209
      // Dummy Variable is used to represent dependencies between operators, so
      // there doesn't add event for it.
      auto *in_var_handle = dynamic_cast<VarHandle *>(in_var);
      if (in_var_handle) {
        auto &place = in_var_handle->place();
        if (platform::is_gpu_place(place)) {
#ifdef PADDLE_WITH_CUDA
          auto stream =
              static_cast<platform::CUDADeviceContext *>(dev_ctxes_.at(place))
                  ->stream();
210
          PADDLE_ENFORCE_CUDA_SUCCESS(
211 212
              cudaStreamWaitEvent(stream, in_var_handle->GetEvent(), 0));
#else
213 214
          PADDLE_THROW(
              platform::errors::PreconditionNotMet("Not compiled with CUDA."));
215 216 217
#endif
        }
        // There are nothing to do when the place is CPUPlace.
C
chengduoZH 已提交
218
      }
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
    } else {
      // NOTE(zhiqiu): Special case when using fetch_async_op_handle may lead to
      // nodetermination due to parallel execution of cuda memory operation. Eg:
      // execute stream: CPU->GPU copy (feed)
      // fetch stream: GPU->CUDAPinned (fetch)
      if (in_var && wait_for_feed) {
        auto *in_var_handle = dynamic_cast<VarHandle *>(in_var);
        if (in_var_handle) {
          auto &place = in_var_handle->place();
          if (platform::is_gpu_place(place)) {
#ifdef PADDLE_WITH_CUDA
            platform::DeviceContextPool &pool =
                platform::DeviceContextPool::Instance();
            auto stream =
                static_cast<platform::CUDADeviceContext *>(pool.Get(place))
                    ->stream();
            PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
#else
            PADDLE_THROW(platform::errors::PreconditionNotMet(
                "Not compiled with CUDA."));
#endif
          }
        }
      }
C
chengduoZH 已提交
243 244 245 246 247
    }
  }
}

void OpHandleBase::WaitInputVarGenerated(const platform::Place &place) {
248 249
  for (auto in_var : inputs_) {
    if (NeedWait(in_var)) {
250 251
      // Dummy Variable is used to represent dependencies between operators,
      // so there doesn't add event for it.
252 253 254 255 256 257 258
      auto *in_var_handle = dynamic_cast<VarHandle *>(in_var);
      if (in_var_handle) {
        if (platform::is_gpu_place(in_var_handle->place())) {
#ifdef PADDLE_WITH_CUDA
          auto stream = static_cast<platform::CUDADeviceContext *>(
                            dev_ctxes_.at(in_var_handle->place()))
                            ->stream();
259
          PADDLE_ENFORCE_CUDA_SUCCESS(
260 261
              cudaStreamWaitEvent(stream, in_var_handle->GetEvent(), 0));
#else
262 263
          PADDLE_THROW(
              platform::errors::PreconditionNotMet("Not compiled with CUDA."));
264 265 266 267
#endif
        }
        // There are nothing to do when the place is CPUPlace.
      }
C
chengduoZH 已提交
268 269 270 271
    }
  }
}

C
chengduoZH 已提交
272 273 274 275 276 277 278 279 280 281
size_t OpHandleBase::NoDummyInputSize() const {
  size_t cnt = 0;
  for (auto *in : inputs_) {
    if (dynamic_cast<DummyVarHandle *>(in) == nullptr) {
      ++cnt;
    }
  }
  return cnt;
}

C
chengduoZH 已提交
282
bool OpHandleBase::NeedWait(VarHandleBase *in_var) {
X
Xin Pan 已提交
283
  return in_var && in_var->GeneratedOp();
C
chengduoZH 已提交
284 285
}

Y
Yu Yang 已提交
286
void OpHandleBase::RunAndRecordEvent(const std::function<void()> &callback) {
287
  callback();
Y
Yu Yang 已提交
288 289 290
#ifdef PADDLE_WITH_CUDA
  if (!events_.empty()) {  // Use event
    for (auto &p : dev_ctxes_) {
291
      auto dev_id = BOOST_GET_CONST(platform::CUDAPlace, p.first).device;
292 293 294 295
      auto *cuda_dev_ctx = static_cast<platform::CUDADeviceContext *>(p.second);
      VLOG(10) << "cudadevicecontext:" << cuda_dev_ctx << ", dev_id:" << dev_id;
      PADDLE_ENFORCE_CUDA_SUCCESS(
          cudaEventRecord(events_.at(dev_id), cuda_dev_ctx->stream()));
C
chengduoZH 已提交
296 297 298 299 300
    }
  }
#endif
}

301 302
void OpHandleBase::RunAndRecordEvent(platform::Place p,
                                     const std::function<void()> &callback) {
Y
Yu Yang 已提交
303
#ifdef PADDLE_WITH_CUDA
304 305 306 307 308
  if (platform::is_cpu_place(p) || events_.empty()) {
    callback();
  } else {
    auto *ctx = dev_ctxes_.at(p);
    auto *cuda_ctx = static_cast<platform::CUDADeviceContext *>(ctx);
309 310
    cuda_ctx->RecordEvent(
        events_.at(BOOST_GET_CONST(platform::CUDAPlace, p).device), callback);
Y
Yu Yang 已提交
311
  }
312
#else
Y
Yu Yang 已提交
313
  callback();
314 315 316
#endif
}

Y
Stash  
yuyang18 已提交
317 318 319 320 321 322 323 324 325 326
size_t OpHandleBase::NotReadyInputSize() const {
  std::unordered_set<VarHandleBase *> res;
  for (auto *var : inputs_) {
    if (var->GeneratedOp() != nullptr) {
      res.emplace(var);
    }
  }
  return res.size();
}

327 328 329 330 331 332
void OpHandleBase::SetLocalExecScopes(
    const std::unordered_map<Scope *, Scope *> &scope_map) {
  local_exec_scopes_.clear();
  auto scopes = GetLocalScopes();
  for (auto *scope : scopes) {
    auto iter = scope_map.find(scope);
333 334 335
    PADDLE_ENFORCE_NE(
        iter, scope_map.end(),
        platform::errors::NotFound("Local scope not found in scope map."));
336 337 338 339
    local_exec_scopes_.emplace_back(iter->second);
  }
}

Y
Yu Yang 已提交
340 341 342
}  // namespace details
}  // namespace framework
}  // namespace paddle