device_context_test.cu 4.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
QI JUN 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15
#include <vector>

D
dzhwinter 已提交
16
#include "glog/logging.h"
17
#include "gtest/gtest.h"
W
Wilber 已提交
18
#include "paddle/fluid/memory/allocation/allocator_facade.h"
19
#include "paddle/fluid/platform/device_context.h"
Q
QI JUN 已提交
20

Q
qijun 已提交
21
TEST(Device, Init) {
D
dzhwinter 已提交
22
  using paddle::platform::CUDAPlace;
23
  using paddle::platform::DeviceContext;
L
Leo Chen 已提交
24
  using phi::GPUContext;
Y
Yi Wang 已提交
25

26
  int count = paddle::platform::GetGPUDeviceCount();
Q
QI JUN 已提交
27
  for (int i = 0; i < count; i++) {
L
Leo Chen 已提交
28
    phi::GPUContext* device_context = new phi::GPUContext(CUDAPlace(i));
W
Wilber 已提交
29 30 31 32 33 34 35 36 37 38 39 40
    device_context->SetAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetAllocator(CUDAPlace(i), device_context->stream())
            .get());
    device_context->SetHostAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetAllocator(paddle::platform::CPUPlace())
            .get());
    device_context->SetZeroAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetZeroAllocator(CUDAPlace(i))
            .get());
W
wanghuancoder 已提交
41 42 43 44
    device_context->SetPinnedAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetAllocator(paddle::platform::CUDAPinnedPlace())
            .get());
W
Wilber 已提交
45 46
    device_context->PartialInitWithAllocator();

Q
QI JUN 已提交
47
    Eigen::GpuDevice* gpu_device = device_context->eigen_device();
Q
qijun 已提交
48
    ASSERT_NE(nullptr, gpu_device);
Q
qijun 已提交
49
    delete device_context;
Q
QI JUN 已提交
50
  }
Q
qijun 已提交
51 52
}

L
Leo Chen 已提交
53
TEST(Device, GPUContext) {
D
dzhwinter 已提交
54
  using paddle::platform::CUDAPlace;
L
Leo Chen 已提交
55
  using phi::GPUContext;
Y
Yi Wang 已提交
56

57
  int count = paddle::platform::GetGPUDeviceCount();
Q
qijun 已提交
58
  for (int i = 0; i < count; i++) {
L
Leo Chen 已提交
59
    phi::GPUContext* device_context = new phi::GPUContext(CUDAPlace(i));
W
Wilber 已提交
60 61 62 63 64 65 66 67 68 69 70 71
    device_context->SetAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetAllocator(CUDAPlace(i), device_context->stream())
            .get());
    device_context->SetHostAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetAllocator(paddle::platform::CPUPlace())
            .get());
    device_context->SetZeroAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetZeroAllocator(CUDAPlace(i))
            .get());
W
wanghuancoder 已提交
72 73 74 75
    device_context->SetPinnedAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetAllocator(paddle::platform::CUDAPinnedPlace())
            .get());
W
Wilber 已提交
76
    device_context->PartialInitWithAllocator();
Q
qijun 已提交
77 78
    Eigen::GpuDevice* gpu_device = device_context->eigen_device();
    ASSERT_NE(nullptr, gpu_device);
79 80 81
#ifdef PADDLE_WITH_HIP
    miopenHandle_t cudnn_handle = device_context->cudnn_handle();
#else
Q
qijun 已提交
82
    cudnnHandle_t cudnn_handle = device_context->cudnn_handle();
83
#endif
Q
qijun 已提交
84
    ASSERT_NE(nullptr, cudnn_handle);
85 86 87
#ifdef PADDLE_WITH_HIP
    rocblas_handle cublas_handle = device_context->cublas_handle();
#else
88
    cublasHandle_t cublas_handle = device_context->cublas_handle();
89
#endif
90
    ASSERT_NE(nullptr, cublas_handle);
Q
qijun 已提交
91 92 93
    delete device_context;
  }
}
D
dzhwinter 已提交
94

D
dzhwinter 已提交
95 96
TEST(Device, DeviceContextPool) {
  using paddle::platform::CPUPlace;
D
dzhwinter 已提交
97
  using paddle::platform::CUDAPlace;
98 99
  using paddle::platform::DeviceContextPool;
  using paddle::platform::Place;
L
Leo Chen 已提交
100
  using phi::GPUContext;
D
dzhwinter 已提交
101

Y
Yu Yang 已提交
102 103 104 105
  DeviceContextPool& pool = DeviceContextPool::Instance();
  auto cpu_dev_ctx1 = pool.Get(CPUPlace());
  auto cpu_dev_ctx2 = pool.Get(CPUPlace());
  ASSERT_EQ(cpu_dev_ctx2, cpu_dev_ctx1);
D
dzhwinter 已提交
106 107

  std::vector<Place> gpu_places;
108
  int count = paddle::platform::GetGPUDeviceCount();
D
dzhwinter 已提交
109
  for (int i = 0; i < count; ++i) {
Y
Yu Yang 已提交
110 111
    auto dev_ctx = pool.Get(CUDAPlace(i));
    ASSERT_NE(dev_ctx, nullptr);
D
dzhwinter 已提交
112 113
  }
}