device_context_test.cu 3.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
QI JUN 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yi Wang 已提交
14
#include "paddle/fluid/platform/device_context.h"
D
dzhwinter 已提交
15

16 17
#include <vector>

D
dzhwinter 已提交
18
#include "glog/logging.h"
19
#include "gtest/gtest.h"
W
Wilber 已提交
20
#include "paddle/fluid/memory/allocation/allocator_facade.h"
Q
QI JUN 已提交
21

Q
qijun 已提交
22
TEST(Device, Init) {
Y
Yi Wang 已提交
23 24
  using paddle::platform::DeviceContext;
  using paddle::platform::CUDADeviceContext;
D
dzhwinter 已提交
25
  using paddle::platform::CUDAPlace;
Y
Yi Wang 已提交
26

27
  int count = paddle::platform::GetGPUDeviceCount();
Q
QI JUN 已提交
28
  for (int i = 0; i < count; i++) {
D
dzhwinter 已提交
29
    CUDADeviceContext* device_context = new CUDADeviceContext(CUDAPlace(i));
W
Wilber 已提交
30 31 32 33 34 35 36 37 38 39 40 41 42 43
    device_context->SetAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetAllocator(CUDAPlace(i), device_context->stream())
            .get());
    device_context->SetHostAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetAllocator(paddle::platform::CPUPlace())
            .get());
    device_context->SetZeroAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetZeroAllocator(CUDAPlace(i))
            .get());
    device_context->PartialInitWithAllocator();

Q
QI JUN 已提交
44
    Eigen::GpuDevice* gpu_device = device_context->eigen_device();
Q
qijun 已提交
45
    ASSERT_NE(nullptr, gpu_device);
Q
qijun 已提交
46
    delete device_context;
Q
QI JUN 已提交
47
  }
Q
qijun 已提交
48 49 50
}

TEST(Device, CUDADeviceContext) {
Y
Yi Wang 已提交
51
  using paddle::platform::CUDADeviceContext;
D
dzhwinter 已提交
52
  using paddle::platform::CUDAPlace;
Y
Yi Wang 已提交
53

54
  int count = paddle::platform::GetGPUDeviceCount();
Q
qijun 已提交
55
  for (int i = 0; i < count; i++) {
D
dzhwinter 已提交
56
    CUDADeviceContext* device_context = new CUDADeviceContext(CUDAPlace(i));
W
Wilber 已提交
57 58 59 60 61 62 63 64 65 66 67 68 69
    device_context->SetAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetAllocator(CUDAPlace(i), device_context->stream())
            .get());
    device_context->SetHostAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetAllocator(paddle::platform::CPUPlace())
            .get());
    device_context->SetZeroAllocator(
        paddle::memory::allocation::AllocatorFacade::Instance()
            .GetZeroAllocator(CUDAPlace(i))
            .get());
    device_context->PartialInitWithAllocator();
Q
qijun 已提交
70 71
    Eigen::GpuDevice* gpu_device = device_context->eigen_device();
    ASSERT_NE(nullptr, gpu_device);
72 73 74
#ifdef PADDLE_WITH_HIP
    miopenHandle_t cudnn_handle = device_context->cudnn_handle();
#else
Q
qijun 已提交
75
    cudnnHandle_t cudnn_handle = device_context->cudnn_handle();
76
#endif
Q
qijun 已提交
77
    ASSERT_NE(nullptr, cudnn_handle);
78 79 80
#ifdef PADDLE_WITH_HIP
    rocblas_handle cublas_handle = device_context->cublas_handle();
#else
81
    cublasHandle_t cublas_handle = device_context->cublas_handle();
82
#endif
83
    ASSERT_NE(nullptr, cublas_handle);
Q
qijun 已提交
84 85 86
    delete device_context;
  }
}
D
dzhwinter 已提交
87

D
dzhwinter 已提交
88 89 90 91 92
TEST(Device, DeviceContextPool) {
  using paddle::platform::DeviceContextPool;
  using paddle::platform::CUDADeviceContext;
  using paddle::platform::Place;
  using paddle::platform::CPUPlace;
D
dzhwinter 已提交
93
  using paddle::platform::CUDAPlace;
D
dzhwinter 已提交
94

Y
Yu Yang 已提交
95 96 97 98
  DeviceContextPool& pool = DeviceContextPool::Instance();
  auto cpu_dev_ctx1 = pool.Get(CPUPlace());
  auto cpu_dev_ctx2 = pool.Get(CPUPlace());
  ASSERT_EQ(cpu_dev_ctx2, cpu_dev_ctx1);
D
dzhwinter 已提交
99 100

  std::vector<Place> gpu_places;
101
  int count = paddle::platform::GetGPUDeviceCount();
D
dzhwinter 已提交
102
  for (int i = 0; i < count; ++i) {
Y
Yu Yang 已提交
103 104
    auto dev_ctx = pool.Get(CUDAPlace(i));
    ASSERT_NE(dev_ctx, nullptr);
D
dzhwinter 已提交
105 106
  }
}