device_context.cc 28.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
qijun 已提交
2 3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
6

Q
qijun 已提交
7 8 9 10 11
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yi Wang 已提交
12
#include "paddle/fluid/platform/device_context.h"
13
#include <memory>
14
#include <set>
15

16
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
17
#include "paddle/fluid/memory/allocation/cuda_device_context_allocator.h"
S
sneaxiy 已提交
18
#include "paddle/fluid/platform/cuda_device_guard.h"
19
#endif
F
fwenguang 已提交
20 21 22 23
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/device_context.h"
#include "paddle/fluid/platform/device/mlu/device_context_allocator.h"
#endif
J
jianghaicheng 已提交
24 25 26
#ifdef PADDLE_WITH_IPU
#include "paddle/fluid/platform/ipu/ipu_backend.h"
#endif
27
#include "glog/logging.h"
28
#include "paddle/fluid/framework/expect.h"
29
#include "paddle/fluid/platform/profiler.h"
30

31 32 33 34 35
namespace paddle {
namespace memory {

AllocationPtr Alloc(const platform::DeviceContext& dev_ctx, size_t size) {
  auto place = dev_ctx.GetPlace();
36
  if (size == 0) {
37 38
    return Alloc(place, size);
  }
39 40

  if (platform::is_gpu_place(place)) {
41
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
    auto* default_dev_ctx = static_cast<platform::CUDADeviceContext*>(
        platform::DeviceContextPool::Instance().Get(place));
    auto& desired_dev_ctx =
        static_cast<const platform::CUDADeviceContext&>(dev_ctx);
    if (default_dev_ctx->stream() == desired_dev_ctx.stream()) {
      return Alloc(place, size);
    } else {
      return allocation::CUDADeviceContextAllocatorPool::Instance().Alloc(
          desired_dev_ctx, size);
    }
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't use CUDA device since it's not compiled with CUDA,"
        "Please recompile or reinstall Paddle with GPU support."));
#endif
  } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
    // TODO(liuyuhui): Consider xpu stream later
60 61
    return Alloc(place, size);
#else
62 63 64
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't use XPU device since it's not compiled with XPU,"
        "Please recompile or reinstall Paddle with XPU support."));
F
fwenguang 已提交
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
#endif
  } else if (platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_MLU
    auto* default_dev_ctx = static_cast<platform::MLUDeviceContext*>(
        platform::DeviceContextPool::Instance().Get(place));
    auto& desired_dev_ctx =
        static_cast<const platform::MLUDeviceContext&>(dev_ctx);
    if (default_dev_ctx->stream() == desired_dev_ctx.stream()) {
      return Alloc(place, size);
    } else {
      return allocation::MLUDeviceContextAllocatorPool::Instance().Alloc(
          desired_dev_ctx, size);
    }
#else
    PADDLE_THROW(platform::errors::PermissionDenied(
        "Paddle can't use MLU device since it's not compiled with MLU,"
        "Please recompile or reinstall Paddle with MLU support."));
82
#endif
83 84 85
  } else {
    return Alloc(place, size);
  }
86 87 88 89 90
}

}  // namespace memory
}  // namespace paddle

Q
qijun 已提交
91 92 93
namespace paddle {
namespace platform {

94
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
95 96 97
bool allow_tf32_cublas = true;
void SetAllowTF32Cublas(bool active) { allow_tf32_cublas = active; }
bool AllowTF32Cublas() { return allow_tf32_cublas; }
A
AshburnLee 已提交
98 99 100 101

bool allow_tf32_cudnn = true;
void SetAllowTF32Cudnn(bool active) { allow_tf32_cudnn = active; }
bool AllowTF32Cudnn() { return allow_tf32_cudnn; }
102 103
#endif  // PADDLE_WITH_CUDA

104 105 106 107 108 109 110
DeviceType Place2DeviceType(const platform::Place& place) {
  if (platform::is_cpu_place(place)) {
    return platform::DeviceType::CPU;
  } else if (platform::is_gpu_place(place)) {
    return platform::DeviceType::CUDA;
  } else if (platform::is_xpu_place(place)) {
    return platform::DeviceType::XPU;
F
fwenguang 已提交
111 112
  } else if (platform::is_mlu_place(place)) {
    return platform::DeviceType::MLU;
113 114 115 116 117 118
  } else {
    PADDLE_THROW(platform::errors::Unavailable(
        "Unsupported place %s to convert into platform::DeviceType.", place));
  }
}

D
dzhwinter 已提交
119 120
DeviceContextPool* DeviceContextPool::pool = nullptr;

Y
Yu Yang 已提交
121
platform::DeviceContext* DeviceContextPool::Get(const platform::Place& place) {
122
  VLOG(6) << "DeviceContextPool Get: " << place;
D
dzhwinter 已提交
123 124
  auto it = device_contexts_.find(place);
  if (it == device_contexts_.end()) {
G
GaoWei8 已提交
125 126
    PADDLE_THROW(platform::errors::Unimplemented(
        "Place %s is not supported. Please check that your paddle compiles "
F
fwenguang 已提交
127 128
        "with WITH_GPU, WITH_XPU, WITH_IPU, WITH_MLU or WITH_ASCEND_CL option "
        "or check "
J
jianghaicheng 已提交
129 130
        "that your train process set the correct device id if you use "
        "Executor.",
G
GaoWei8 已提交
131
        place));
D
dzhwinter 已提交
132
  }
133
  return it->second.get().get();
D
dzhwinter 已提交
134 135
}

W
Wilber 已提交
136
template <typename DevCtx>
137 138 139 140 141 142 143 144
inline void EmplaceDeviceContext(
    std::map<Place, std::shared_future<std::unique_ptr<DeviceContext>>>*
        map_ptr,
    platform::Place p) {
  using PtrType = std::unique_ptr<DeviceContext>;
  map_ptr->emplace(p, std::async(std::launch::deferred, [=] {
                     // lazy evaluation. i.e., only create device context at
                     // first `Get`
145
                     return PtrType(new DevCtx(p));
146
                   }));
C
chengduozh 已提交
147 148
}

D
dzhwinter 已提交
149 150
DeviceContextPool::DeviceContextPool(
    const std::vector<platform::Place>& places) {
G
GaoWei8 已提交
151 152 153 154 155
  PADDLE_ENFORCE_GT(
      places.size(), 0,
      platform::errors::InvalidArgument("The number of platform places should "
                                        "be larger than 0. But received %d.",
                                        places.size()));
156
  std::set<Place> set;
Y
Yu Yang 已提交
157 158 159 160 161
  for (auto& p : places) {
    set.insert(p);
  }
  for (auto& p : set) {
    if (platform::is_cpu_place(p)) {
162
#ifdef PADDLE_WITH_MKLDNN
W
Wilber 已提交
163
      EmplaceDeviceContext<MKLDNNDeviceContext>(&device_contexts_, p);
164
#else
W
Wilber 已提交
165
      EmplaceDeviceContext<CPUDeviceContext>(&device_contexts_, p);
166
#endif
Y
Yu Yang 已提交
167
    } else if (platform::is_gpu_place(p)) {
168
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
W
Wilber 已提交
169
      EmplaceDeviceContext<CUDADeviceContext>(&device_contexts_, p);
D
dzhwinter 已提交
170
#else
G
GaoWei8 已提交
171 172 173
      PADDLE_THROW(
          platform::errors::Unimplemented("CUDAPlace is not supported. Please "
                                          "re-compile with WITH_GPU option."));
C
chengduoZH 已提交
174 175
#endif
    } else if (platform::is_cuda_pinned_place(p)) {
176
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
W
Wilber 已提交
177
      EmplaceDeviceContext<CUDAPinnedDeviceContext>(&device_contexts_, p);
C
chengduoZH 已提交
178
#else
G
GaoWei8 已提交
179
      PADDLE_THROW(platform::errors::Unimplemented(
G
GaoWei8 已提交
180 181
          "CUDAPlace is not supported. Please re-compile with WITH_GPU "
          "option."));
182 183 184
#endif
    } else if (platform::is_xpu_place(p)) {
#ifdef PADDLE_WITH_XPU
W
Wilber 已提交
185
      EmplaceDeviceContext<XPUDeviceContext>(&device_contexts_, p);
186 187 188 189
#else
      PADDLE_THROW(
          platform::errors::Unimplemented("XPUPlace is not supported. Please "
                                          "re-compile with WITH_XPU option."));
F
fwenguang 已提交
190 191 192
#endif
    } else if (platform::is_mlu_place(p)) {
#ifdef PADDLE_WITH_MLU
W
Wilber 已提交
193
      EmplaceDeviceContext<MLUDeviceContext>(&device_contexts_, p);
F
fwenguang 已提交
194 195 196 197
#else
      PADDLE_THROW(
          platform::errors::Unimplemented("MLUPlace is not supported. Please "
                                          "re-compile with WITH_MLU option."));
J
jianghaicheng 已提交
198 199 200
#endif
    } else if (platform::is_ipu_place(p)) {
#ifdef PADDLE_WITH_IPU
W
Wilber 已提交
201
      EmplaceDeviceContext<IPUDeviceContext>(&device_contexts_, p);
J
jianghaicheng 已提交
202 203 204 205
#else
      PADDLE_THROW(
          platform::errors::Unimplemented("IPUPlace is not supported. Please "
                                          "re-compile with WITH_IPU option."));
206 207 208
#endif
    } else if (platform::is_npu_place(p)) {
#ifdef PADDLE_WITH_ASCEND_CL
W
Wilber 已提交
209
      EmplaceDeviceContext<NPUDeviceContext>(&device_contexts_, p);
210 211 212 213
#else
      PADDLE_THROW(platform::errors::Unimplemented(
          "NPUPlace is not supported. Please "
          "re-compile with WITH_ASCEND_CL option."));
214 215 216
#endif
    } else if (platform::is_npu_pinned_place(p)) {
#ifdef PADDLE_WITH_ASCEND_CL
W
Wilber 已提交
217
      EmplaceDeviceContext<NPUPinnedDeviceContext>(&device_contexts_, p);
218 219 220 221 222
#else
      PADDLE_THROW(platform::errors::Unimplemented(
          "NPUPinnedPlace is not supported. Please re-compile with "
          "WITH_ASCEND_CL "
          "option."));
D
dzhwinter 已提交
223 224 225 226 227
#endif
    }
  }
}

W
Wilber 已提交
228
CPUDeviceContext::CPUDeviceContext() : pten::CPUContext() {}
229

W
Wilber 已提交
230
CPUDeviceContext::CPUDeviceContext(CPUPlace place) : pten::CPUContext() {}
231

J
jianghaicheng 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
#ifdef PADDLE_WITH_IPU
IPUDeviceContext::IPUDeviceContext(IPUPlace place) : place_(place) {
  int id = place.GetDeviceId();
  std::shared_ptr<platform::ipu::IpuBackend> ipu_backend =
      platform::ipu::IpuBackend::GetInstance();
  device_ = ipu_backend->GetDevice(id);
}

Place IPUDeviceContext::GetPlace() const { return place_; }
void IPUDeviceContext::Wait() const {
  /*! \brief  Wait for all operations completion in the stream. */
}

IPUDeviceContext::~IPUDeviceContext() {}

#endif
248
#ifdef PADDLE_WITH_XPU
W
Wilber 已提交
249
XPUDeviceContext::XPUDeviceContext() : pten::XPUContext() {}
250

251
XPUDeviceContext::~XPUDeviceContext() {}
252

W
Wilber 已提交
253
XPUDeviceContext::XPUDeviceContext(XPUPlace place) : pten::XPUContext(place) {
254
  LOG_FIRST_N(WARNING, 1) << "Please NOTE: xpu device: "
W
Wilber 已提交
255
                          << static_cast<int>(place.device);
256 257 258
}
#endif

259 260 261 262 263 264 265
#ifdef PADDLE_WITH_ASCEND_CL
NPUDeviceContext::NPUDeviceContext(NPUPlace place) : place_(place) {
  NPUDeviceGuard guard(place_.device);
  // PADDLE_ENFORCE_NPU_SUCCESS(aclrtCreateContext(&context_, place_.device));
  // NOTE(zhiqiu): Usually, no need to create context explicitly,
  // ACL creates a default context which contains 1 default stream
  // and 1 sync strean after aclrtSetDevice.
266
  platform::GetCurrentNPUContext(&context_);
267 268 269 270 271 272 273
  stream_.reset(new stream::NPUStream(place));
}

NPUDeviceContext::~NPUDeviceContext() {
  // NPUDeviceGuard guard(place_.device);
  // PADDLE_ENFORCE_NPU_SUCCESS(aclrtDestroyContext(context_));
}
274

275
void NPUDeviceContext::Wait() const {
276 277 278
  platform::RecordEvent record_event("NPUDeviceContext/wait");
  VLOG(4) << "NPU context(" << this << ")  Wait";
  stream_->Wait();
279 280 281 282 283 284 285
}

aclrtStream NPUDeviceContext::stream() const { return stream_->raw_stream(); }

Place NPUDeviceContext::GetPlace() const { return place_; }

aclrtContext NPUDeviceContext::context() const { return context_; }
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301

NPUPinnedDeviceContext::NPUPinnedDeviceContext() {
  eigen_device_.reset(new Eigen::DefaultDevice());
}

NPUPinnedDeviceContext::NPUPinnedDeviceContext(NPUPinnedPlace place)
    : place_(place) {
  eigen_device_.reset(new Eigen::DefaultDevice());
}

Eigen::DefaultDevice* NPUPinnedDeviceContext::eigen_device() const {
  return eigen_device_.get();
}

Place NPUPinnedDeviceContext::GetPlace() const { return place_; }

302 303 304
#endif

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Q
init  
qijun 已提交
305 306 307 308 309 310 311
class EigenCudaStreamDevice : public Eigen::StreamInterface {
 public:
  EigenCudaStreamDevice() : scratch_(nullptr), semaphore_(nullptr) {
    Eigen::initializeDeviceProp();
  }
  ~EigenCudaStreamDevice() override {}

312
  void Reinitialize(const gpuStream_t* cuda_stream, CUDAPlace place) {
Q
init  
qijun 已提交
313 314 315 316 317
    stream_ = cuda_stream;
    place_ = place;
    device_prop_ = &Eigen::m_deviceProperties[place.device];
  }

318
  const gpuStream_t& stream() const override { return *stream_; }
Q
init  
qijun 已提交
319

320 321 322
#ifdef PADDLE_WITH_HIP
  const hipDeviceProp_t& deviceProperties() const override {
#else
Q
init  
qijun 已提交
323
  const cudaDeviceProp& deviceProperties() const override {
324
#endif
Q
init  
qijun 已提交
325 326 327 328
    return *device_prop_;
  }

  void* allocate(size_t num_bytes) const override {
S
sneaxiy 已提交
329 330 331
    if (UNLIKELY(num_bytes == 0)) {
      return nullptr;
    }
332 333 334
    auto buf = memory::Alloc(place_, num_bytes);
    VLOG(4) << "Eigen allocated at " << buf->ptr() << ", size" << buf->size()
            << " requested " << num_bytes;
335
    void* retv = buf->ptr();
S
sneaxiy 已提交
336 337 338 339
    {
      std::lock_guard<std::mutex> lock(mtx_);
      allocations_.emplace(retv, std::move(buf));
    }
340
    return retv;
Q
init  
qijun 已提交
341 342
  }

S
sneaxiy 已提交
343 344 345 346 347 348
  void deallocate(void* buffer) const override {
    if (LIKELY(buffer)) {
      std::lock_guard<std::mutex> lock(mtx_);
      allocations_.erase(buffer);
    }
  }
Q
init  
qijun 已提交
349 350 351

  void* scratchpad() const override {
    if (scratch_ == NULL) {
Z
Zhang Ting 已提交
352
      scratch_ = allocate(Eigen::kGpuScratchSize + sizeof(unsigned int));
Q
init  
qijun 已提交
353 354 355 356 357 358
    }
    return scratch_;
  }

  unsigned int* semaphore() const override {
    if (semaphore_ == NULL) {
Z
Zhang Ting 已提交
359
      char* scratch = static_cast<char*>(scratchpad()) + Eigen::kGpuScratchSize;
Q
init  
qijun 已提交
360
      semaphore_ = reinterpret_cast<unsigned int*>(scratch);
361
#ifdef PADDLE_WITH_HIP
362
      PADDLE_ENFORCE_GPU_SUCCESS(
363 364
          hipMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_));
#else
365
      PADDLE_ENFORCE_GPU_SUCCESS(
Q
init  
qijun 已提交
366
          cudaMemsetAsync(semaphore_, 0, sizeof(unsigned int), *stream_));
367
#endif
Q
init  
qijun 已提交
368 369 370 371 372
    }
    return semaphore_;
  }

 private:
D
dzhwinter 已提交
373
  CUDAPlace place_;
374 375 376 377
  const gpuStream_t* stream_;  // not owned;
#ifdef PADDLE_WITH_HIP
  const hipDeviceProp_t* device_prop_;
#else
Q
init  
qijun 已提交
378
  const cudaDeviceProp* device_prop_;  // not owned;
379
#endif
Q
qijun 已提交
380
  mutable void* scratch_;
Q
init  
qijun 已提交
381
  mutable unsigned int* semaphore_;
S
sneaxiy 已提交
382
  mutable std::mutex mtx_;  // to protect allocations_
Y
Yu Yang 已提交
383
  mutable std::unordered_map<void*, memory::AllocationPtr> allocations_;
Q
init  
qijun 已提交
384 385
};

386 387 388 389 390 391 392 393 394
void CudnnWorkspaceHandle::ReallocWorkspace(size_t required_workspace_bytes) {
  if (required_workspace_bytes <= WorkspaceSize()) {
    return;
  }
  // reset allocation first before re-allocate to save memory
  allocation_.reset();
  allocation_ = memory::Alloc(device_context_, required_workspace_bytes);
}

395 396 397 398 399 400 401 402 403 404 405 406
thread_local std::unordered_map<const CUDADeviceContext*,
                                std::shared_ptr<CUDAContext>>
    CUDADeviceContext::thread_ctx_;
thread_local std::mutex CUDADeviceContext::ctx_mtx_;

void CUDAContext::InitEigenContext() {
  eigen_stream_.reset(new EigenCudaStreamDevice());
  eigen_stream_->Reinitialize(&RawStream(), place_);
  eigen_device_.reset(new Eigen::GpuDevice(eigen_stream_.get()));
}

CUDAContext::CUDAContext(const CUDAPlace& place,
407 408
                         const stream::Priority& priority,
                         const stream::StreamFlag& flag) {
409 410
  place_ = place;
  CUDADeviceGuard guard(place_.device);
411
  stream_.reset(new stream::CUDAStream(place, priority, flag));
412 413 414
  InitEigenContext();
  InitCuBlasContext();
  InitCuDNNContext();
415
#ifndef PADDLE_WITH_HIP
Z
zhangkaihuo 已提交
416
  InitCuSparseContext();
G
Guo Sheng 已提交
417
  InitCuSolverContext();
418
#endif
419 420
}

W
Wilber 已提交
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
void CUDAContext::SetStream(gpuStream_t stream) {
  if (stream_->raw_stream() != stream) {
    CUDADeviceGuard guard(place_.device);
    DestoryCuDNNContext();
    DestoryCuBlasContext();
#ifndef PADDLE_WITH_HIP
    DestoryCuSolverContext();
#endif

    stream_->SetStream(stream);

    InitEigenContext();
    InitCuBlasContext();
    InitCuDNNContext();
#ifndef PADDLE_WITH_HIP
    InitCuSolverContext();
#endif
  }
}

441 442 443 444
CUDAContext::~CUDAContext() {
  CUDADeviceGuard guard(place_.device);
  DestoryCuDNNContext();
  DestoryCuBlasContext();
445
#ifndef PADDLE_WITH_HIP
Z
zhangkaihuo 已提交
446
  DestoryCuSparseContext();
G
Guo Sheng 已提交
447
  DestoryCuSolverContext();
448
#endif
449 450
}

451
CUDADeviceContext::CUDADeviceContext(CUDAPlace place) : place_(place) {
Y
Yu Yang 已提交
452
  CUDADeviceGuard guard(place_.device);
453 454 455
  compute_capability_ = GetGPUComputeCapability(place_.device);
  multi_process_ = GetGPUMultiProcessors(place_.device);
  max_threads_per_mp_ = GetGPUMaxThreadsPerMultiProcessor(place_.device);
456
  max_grid_dim_size_ = GetGpuMaxGridDimSize(place_.device);
457
  max_threads_per_block_ = GetGPUMaxThreadsPerBlock(place_.device);
458

459 460
  driver_version_ = GetGPUDriverVersion(place_.device);
  runtime_version_ = GetGPURuntimeVersion(place_.device);
C
chengduo 已提交
461

462 463
  LOG_FIRST_N(WARNING, 1) << "Please NOTE: device: "
                          << static_cast<int>(place_.device)
464 465 466
                          << ", GPU Compute Capability: "
                          << compute_capability_ / 10 << "."
                          << compute_capability_ % 10
C
chengduo 已提交
467
                          << ", Driver API Version: " << driver_version_ / 1000
468
                          << "." << (driver_version_ % 100) / 10
C
chengduo 已提交
469 470 471
                          << ", Runtime API Version: "
                          << runtime_version_ / 1000 << "."
                          << (runtime_version_ % 100) / 10;
472 473
#ifdef PADDLE_WITH_HIP
  size_t version_major, version_minor, version_patch;
474
  PADDLE_ENFORCE_GPU_SUCCESS(dynload::miopenGetVersion(
475
      &version_major, &version_minor, &version_patch));
476
  LOG_FIRST_N(WARNING, 1) << "device: " << static_cast<int>(place_.device)
477 478 479
                          << ", MIOpen Version: " << version_major << "."
                          << version_minor << "." << version_patch;
#else
480
  size_t cudnn_dso_ver = dynload::cudnnGetVersion();
481
  LOG_FIRST_N(WARNING, 1) << "device: " << static_cast<int>(place_.device)
482
                          << ", cuDNN Version: " << cudnn_dso_ver / 1000 << "."
483
                          << (cudnn_dso_ver % 1000) / 100 << ".";
484
#endif
S
sneaxiy 已提交
485 486
  {
    // Check CUDA/CUDNN version compatiblity
487 488
    auto local_cuda_version =
        (driver_version_ / 1000) * 10 + (driver_version_ % 100) / 10;
489 490 491
#ifdef PADDLE_WITH_HIP
    auto compile_cuda_version = (HIP_VERSION / 100) * 10 + (HIP_VERSION % 10);
#else
492 493
    auto compile_cuda_version =
        (CUDA_VERSION / 1000) * 10 + (CUDA_VERSION % 100) / 10;
494
#endif
S
sneaxiy 已提交
495 496
    if (local_cuda_version < compile_cuda_version) {
      LOG_FIRST_N(WARNING, 1)
497
          << "WARNING: device: " << static_cast<int>(place_.device)
S
sneaxiy 已提交
498 499 500 501 502 503 504 505 506
          << ". The installed Paddle is compiled with CUDA "
          << compile_cuda_version / 10 << "." << compile_cuda_version % 10
          << ", but CUDA runtime version in your machine is "
          << local_cuda_version / 10 << "." << local_cuda_version % 10
          << ", which may cause serious incompatible bug. "
          << "Please recompile or reinstall Paddle with compatible CUDA "
             "version.";
    }
  }
507
  default_ctx_.reset(new CUDAContext(place_));
508 509 510 511
}

CUDADeviceContext::~CUDADeviceContext() {
  SetDeviceId(place_.device);
512
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
513
  if (nccl_comm_) {
514
    PADDLE_ENFORCE_GPU_SUCCESS(dynload::ncclCommDestroy(nccl_comm_));
515 516
  }
#endif
517 518
}

L
liaogang 已提交
519
Place CUDADeviceContext::GetPlace() const { return place_; }
520

521
void CUDADeviceContext::Wait() const { context()->Stream()->Wait(); }
522

K
Kexin Zhao 已提交
523
int CUDADeviceContext::GetComputeCapability() const {
C
chengduo 已提交
524
  return compute_capability_;
K
Kexin Zhao 已提交
525 526
}

527
int CUDADeviceContext::GetMaxPhysicalThreadCount() const {
C
chengduo 已提交
528
  return multi_process_ * max_threads_per_mp_;
529 530
}

531 532 533 534 535 536
int CUDADeviceContext::GetSMCount() const { return multi_process_; }

int CUDADeviceContext::GetMaxThreadsPerBlock() const {
  return max_threads_per_block_;
}

537
Eigen::GpuDevice* CUDADeviceContext::eigen_device() const {
538
  return context()->EigenDevice().get();
539 540
}

541
bool CUDADeviceContext::tensor_core_available() const {
542
  return context()->CublasTensorCoreHandle() != nullptr;
S
sneaxiy 已提交
543 544
}

545 546 547 548
dim3 CUDADeviceContext::GetCUDAMaxGridDimSize() const {
  return max_grid_dim_size_;
}

549 550 551
#ifdef PADDLE_WITH_HIP
miopenHandle_t CUDADeviceContext::cudnn_handle() const {
#else
552
cudnnHandle_t CUDADeviceContext::cudnn_handle() const {
553
#endif
554 555
  return context()->CudnnHandle();
}
556

557 558 559 560 561
#ifdef PADDLE_WITH_HIP
rocblas_handle CUDADeviceContext::cublas_handle() const {
  return context()->CublasHandle()->GetCublasHandle();
}
#else
562 563 564
cublasHandle_t CUDADeviceContext::cublas_handle() const {
  return context()->CublasHandle()->GetCublasHandle();
}
Z
zhangkaihuo 已提交
565 566 567
cusparseHandle_t CUDADeviceContext::cusparse_handle() const {
  return context()->CusparseHandle()->GetCusparseHandle();
}
568
#endif
569

S
sneaxiy 已提交
570
CudnnWorkspaceHandle CUDADeviceContext::cudnn_workspace_handle() const {
571
  return CudnnWorkspaceHandle(*this, &cudnn_handle_mtx_);
572
}
573

574
#ifndef PADDLE_WITH_HIP
G
Guo Sheng 已提交
575 576 577
cusolverDnHandle_t CUDADeviceContext::cusolver_dn_handle() const {
  return context()->CusolverDnHandle();
}
578
#endif
G
Guo Sheng 已提交
579

580
gpuStream_t CUDADeviceContext::stream() const { return context()->RawStream(); }
Q
qijun 已提交
581

C
chengduoZH 已提交
582 583 584 585 586 587 588 589 590 591 592 593 594 595
CUDAPinnedDeviceContext::CUDAPinnedDeviceContext() {
  eigen_device_.reset(new Eigen::DefaultDevice());
}

CUDAPinnedDeviceContext::CUDAPinnedDeviceContext(CUDAPinnedPlace place)
    : place_(place) {
  eigen_device_.reset(new Eigen::DefaultDevice());
}

Eigen::DefaultDevice* CUDAPinnedDeviceContext::eigen_device() const {
  return eigen_device_.get();
}

Place CUDAPinnedDeviceContext::GetPlace() const { return place_; }
L
Luo Tao 已提交
596
#endif
Q
qijun 已提交
597

T
tensor-tang 已提交
598 599
#ifdef PADDLE_WITH_MKLDNN
MKLDNNDeviceContext::MKLDNNDeviceContext(CPUPlace place)
600
    : CPUDeviceContext(place), p_blobmap_() {
601
  p_blobmap_.reset(new BlobMap());
602
  p_exec_items_.reset(new ExecShape());
603
  p_mutex_.reset(new std::mutex());
T
tensor-tang 已提交
604 605
}

606
MKLDNNDeviceContextThreadLocals::Body::Body()
607
    : cur_engine(dnnl::engine::kind::cpu, 0), cur_stream(cur_engine) {
608 609 610 611 612 613
  cur_mkldnn_session_id = kMKLDNNSessionID_Default;
  cur_input_shape_str = "";
  cur_input_shape_cache_capacity = 1;
  cur_paddle_data_layout = paddle::framework::DataLayout::kNCHW;
}

614 615 616 617 618 619 620 621 622 623 624 625
// When Thread finish we clear oneDNN cache
// This is needed when we have one executor used by many threads
// e.g. test_analyzer_detect. Thread ID is not part of caching key
// (for naive executor) so we need to clear cache when one thread finish
// and other is to start inference
// TODO(jczaja): Ideally it would be good to clear only part of cache
// related to thread that is to be terminated
MKLDNNDeviceContextThreadLocals::Body::~Body() {
  auto cpu_place = paddle::platform::CPUPlace();
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  platform::MKLDNNDeviceContext* dev_ctx =
      (platform::MKLDNNDeviceContext*)pool.Get(cpu_place);
626
  dev_ctx->ResetBlobMap(exec_ptr_);
627 628
}

629 630 631 632 633 634 635 636 637 638
void MKLDNNDeviceContextThreadLocals::Body::set_cur_mkldnn_session_id(
    size_t sid) {
  cur_mkldnn_session_id = sid;
}
size_t MKLDNNDeviceContextThreadLocals::Body::get_cur_mkldnn_session_id(void) {
  return cur_mkldnn_session_id;
}

void MKLDNNDeviceContextThreadLocals::Body::set_cur_input_shape_str(
    std::string input_shape_str) {
639 640
  cur_input_shape_str = input_shape_str;
}
641 642
void MKLDNNDeviceContextThreadLocals::Body::set_cur_input_shape_cache_capacity(
    int input_shape_cache_capacity) {
643 644
  cur_input_shape_cache_capacity = input_shape_cache_capacity;
}
S
Sylwester Fraczek 已提交
645

646 647
void MKLDNNDeviceContextThreadLocals::Body::set_cur_paddle_data_layout(
    framework::DataLayout dl) {
648 649 650
  cur_paddle_data_layout = dl;
}

651 652
framework::DataLayout
MKLDNNDeviceContextThreadLocals::Body::get_cur_paddle_data_layout(void) {
653 654 655
  return cur_paddle_data_layout;
}

656 657 658 659 660 661 662 663 664
void MKLDNNDeviceContextThreadLocals::Body::log_lib_version(void) {
  if (!said_once) {
    said_once = true;
    auto dv = dnnl::version();
    LOG(INFO) << "oneDNN v" << dv->major << "." << dv->minor << "."
              << dv->patch;
  }
}

665
const dnnl::engine& MKLDNNDeviceContextThreadLocals::Body::get_engine(void) {
666 667 668
  return cur_engine;
}

669
dnnl::stream& MKLDNNDeviceContextThreadLocals::Body::get_stream(void) {
670 671 672
  return cur_stream;
}

673
void MKLDNNDeviceContext::ResetBlobMap(void* ptr) {
674 675 676
  std::lock_guard<decltype(*p_mutex_)> lock(*p_mutex_);
  if (!block_next_cache_clearing_) {
    VLOG(3) << "Clearing DNNL cache.";
677 678 679 680 681 682
    // If no specific executor pointer then clear
    // everything. For executor pointer then clear only
    // objects allocated when using given executor
    if (ptr == nullptr) {
      p_blobmap_->clear();
    } else {
683 684 685 686 687
      // Iterate through all shapes and release
      // for each shape and active executor all entries
      // of this executor
      for (auto& s : *p_exec_items_) {
        for (auto& v : (*s.second)[ptr]) {
688
          (v.first)->erase(v.second);
689 690
        }
        s.second->erase(ptr);
691 692
      }
    }
693 694 695 696 697 698
  } else {
    VLOG(3) << "Prevented Clearing DNNL cache.";
    block_next_cache_clearing_ = false;
  }
}

699 700
void MKLDNNDeviceContext::RemoveShapeEntriesWithExecutor(void) const {
  p_exec_items_->erase(p_exec_items_->begin());
701 702
}

703 704
void MKLDNNDeviceContext::LinkEntryWithExecutor(BlobPtr_t<KeyBlob> pblob,
                                                KeyBlob::iterator it) const {
705
  // Take current input shape from TLS
706 707
  // Take current executor addess from TLS
  // and for this executor's items add the one defined with arguments
708 709 710 711 712 713 714 715 716
  auto key_it = p_exec_items_
                    ->insert(std::make_pair(tls().cur_input_shape_str,
                                            std::make_shared<ExecMap>()))
                    .first;
  (*key_it->second)[tls().get_curr_exec()].push_back(std::make_pair(pblob, it));

  VLOG(3) << "LinkEntryWithExecutor, shapes: " << p_exec_items_->size()
          << " curr exec size: "
          << (*key_it->second)[tls().get_curr_exec()].size() << "\n";
717 718
}

719 720 721 722
void MKLDNNDeviceContext::BlockNextCacheClearing() {
  std::lock_guard<decltype(*p_mutex_)> lock(*p_mutex_);
  VLOG(3) << "Next DNNL cache clearing has been blocked.";
  block_next_cache_clearing_ = true;
723
}
724

725
size_t MKLDNNDeviceContext::GetShapeBlobSize() const {
726
  std::lock_guard<decltype(*p_mutex_)> lock(*p_mutex_);
727
  BlobMap* pMap = p_blobmap_.get();
728
  auto map_it = pMap->find(tls().cur_mkldnn_session_id);
729
  if (map_it == pMap->end()) {
730 731 732
    PADDLE_THROW(platform::errors::NotFound(
        "MKLDNNDeviceContext don't find cur_mkldnn_session_id: %d.",
        tls().cur_mkldnn_session_id));
733 734 735 736
  }
  return map_it->second->size();
}

737
void MKLDNNDeviceContext::SetBlob(const std::string& name,
738
                                  BlobPtr_t<void> data) const {
739
  BlobMap* pMap = p_blobmap_.get();
740
  BlobPtr_t<ShapeBlob> sBlob = nullptr;
741
  BlobPtr_t<KeyBlob> pBlob = nullptr;
742

743
  int sid = tls().get_cur_mkldnn_session_id();
T
tensor-tang 已提交
744

745
  std::lock_guard<decltype(*p_mutex_)> lock(*p_mutex_);
T
tensor-tang 已提交
746

747 748
  // Find ShapeBlob for current mkldnn session id.
  auto map_it = pMap->find(sid);
749 750 751

  if (map_it == pMap->end()) {
    // 1st time to set blob in current thread
752
    sBlob = std::make_shared<ShapeBlob>();
753 754
    (*pMap)[sid] = sBlob;
    VLOG(2) << "SetBlob: sid=" << sid << ", add new sid\n";
755
  } else {
756
    sBlob = map_it->second;
757
  }
T
tensor-tang 已提交
758

759
  // Find KeyBlob for current input shape
760
  auto key_it = sBlob->find(tls().cur_input_shape_str);
761

762
  if (key_it == sBlob->end()) {
763 764
    // In cache clearing mode, cur_input_shape_cache_capacity defines
    // max pblob capacity
765 766
    if ((static_cast<size_t>(sid) ==
         MKLDNNDeviceContextThreadLocals::kMKLDNNSessionID_CacheClearing) &&
767
        sBlob->size() &&
768
        (sBlob->size() >=
769
         static_cast<size_t>(tls().cur_input_shape_cache_capacity))) {
770 771 772 773
      VLOG(2) << "sid=" << sid
              << ", remove all blobs of shape: " << sBlob->begin()->first;
      sBlob->erase(sBlob->begin()->first);
      RemoveShapeEntriesWithExecutor();
774
    }
775
    pBlob = std::make_shared<KeyBlob>();
776
    (*sBlob)[tls().cur_input_shape_str] = pBlob;
777
  } else {
778
    pBlob = key_it->second;
779 780
  }

781
  // Find Blob via name
782 783 784 785
  auto blob_it = pBlob->find(name);
  if (blob_it == pBlob->end()) {
    auto el =
        pBlob->insert(std::make_pair(name, data));  //  (*pBlob)[name] = data;
786 787 788
    // Register new element in per executor map
    // to have easily erased when executor terminated
    LinkEntryWithExecutor(pBlob, el.first);
789 790 791
  } else {
    blob_it->second = data;  // set data to existing blob
  }
792
  VLOG(2) << "SetBlob: sid=" << sid << ", add blob=" << name << "\n";
793
  // lock will be automatically released when out of scope
794
  return;
T
tensor-tang 已提交
795 796
}

797
unsigned int MKLDNNDeviceContext::GetCachedObjectsNumber(void) const {
798 799 800
  unsigned int num_entries = 0;
  for (auto const& l3 : *p_blobmap_) {
    for (auto const& l2 : *(l3.second)) {
801
      num_entries += (l2.second)->size();
802 803 804 805 806
    }
  }
  return num_entries;
}

807
MKLDNNDeviceContext::BlobPtr_t<void> MKLDNNDeviceContext::GetBlob(
808
    const std::string& name) const {
809
  BlobMap* pMap = p_blobmap_.get();
810
  BlobPtr_t<ShapeBlob> sBlob = nullptr;
811
  BlobPtr_t<KeyBlob> pBlob = nullptr;
T
tensor-tang 已提交
812

813
  int sid = tls().get_cur_mkldnn_session_id();
T
tensor-tang 已提交
814

815
  std::lock_guard<decltype(*p_mutex_)> lock(*p_mutex_);
816

817 818
  // Find ShapeBlob for current mkldnn session id firstly
  auto map_it = pMap->find(sid);
819 820 821 822
  // (jczaja): After first iteration of model's execution we
  // should have all elements cached (mostly) so failures are unlikely (less
  // likely for dynamic shapes)
  if (unlikely(map_it == pMap->end())) {
823
    VLOG(2) << "GetBlob: sid=" << sid << ", miss sid\n";
824 825 826 827 828
    return nullptr;
  }
  sBlob = map_it->second;

  // Find KeyBlob for current input shape secondly
829
  auto sBlob_it = sBlob->find(tls().cur_input_shape_str);
830
  if (unlikely(sBlob_it == sBlob->end())) {
831
    VLOG(2) << "GetBlob: sid=" << tls().cur_input_shape_str
832 833 834 835
            << ", miss input_shape_str\n";
    return nullptr;
  }
  pBlob = sBlob_it->second;
836 837

  // Find Blob via name
838
  auto key_it = pBlob->find(name);
839

840
  if (unlikely(key_it == pBlob->end())) {
841
    VLOG(2) << "GetBlob sid=" << sid << ", miss blob=" << name << "\n";
842 843
    return nullptr;
  }
844

845
  VLOG(2) << "GetBlob sid=" << sid << ", get blob=" << name << "\n";
846 847
  // lock will be automatically released when out of scope
  return key_it->second;
T
tensor-tang 已提交
848 849 850
}

#endif
Q
qijun 已提交
851
}  // namespace platform
Q
qijun 已提交
852
}  // namespace paddle