system_allocator_test.cc 2.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/memory/detail/system_allocator.h"
Y
Yi Wang 已提交
16 17 18

#include <memory>

19
#include "gflags/gflags.h"
L
liaogang 已提交
20
#include "gtest/gtest.h"
21
#include "paddle/fluid/memory/allocation/allocator.h"
22 23 24
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#endif
L
liaogang 已提交
25

26
DECLARE_bool(use_pinned_memory);
27

Y
Yi Wang 已提交
28
void TestAllocator(paddle::memory::detail::SystemAllocator* a, size_t size) {
29 30
  bool freed = false;
  {
L
liaogang 已提交
31
    size_t index;
Y
Update  
Yi Wang 已提交
32
    void* p = a->Alloc(&index, size);
33 34 35 36 37
    if (size > 0) {
      EXPECT_NE(p, nullptr);
    } else {
      EXPECT_EQ(p, nullptr);
    }
38

39
    int* i = static_cast<int*>(p);
40
    std::shared_ptr<int> ptr(i, [&](void* p) {
41
      freed = true;
Y
Yi Wang 已提交
42
      a->Free(p, size, index);
43 44 45
    });
  }
  EXPECT_TRUE(freed);
46 47
}

Y
Yi Wang 已提交
48
TEST(CPUAllocator, NoLockMem) {
49 50
  FLAGS_use_pinned_memory = false;
  paddle::memory::detail::CPUAllocator a;
Y
Yi Wang 已提交
51 52
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
Y
Yi Wang 已提交
53
}
54

55
TEST(CPUAllocator, LockMem) {
56 57
  FLAGS_use_pinned_memory = true;
  paddle::memory::detail::CPUAllocator a;
Y
Yi Wang 已提交
58 59
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
60 61
}

62
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
L
liaogang 已提交
63
TEST(GPUAllocator, Alloc) {
Y
Yu Yang 已提交
64
  paddle::memory::detail::GPUAllocator a(0);
Y
Yi Wang 已提交
65 66
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
L
liaogang 已提交
67
}
68 69 70 71 72 73

TEST(CUDAPinnedAllocator, Alloc) {
  paddle::memory::detail::CUDAPinnedAllocator a;
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
}
74 75 76 77

TEST(GPUAllocator, AllocFailure) {
  paddle::memory::detail::GPUAllocator allocator(0);
  size_t index;
78
  size_t alloc_size = (static_cast<size_t>(1) << 40);  // Very large number
79 80 81 82
  try {
    allocator.Alloc(&index, alloc_size);
    ASSERT_TRUE(false);
  } catch (paddle::memory::allocation::BadAlloc&) {
83
    PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::GpuGetLastError());
84 85
  }
}
L
Luo Tao 已提交
86
#endif
87 88 89 90 91 92 93 94

#ifdef PADDLE_WITH_ASCEND_CL
TEST(NPUAllocator, Alloc) {
  paddle::memory::detail::NPUAllocator a(0);
  TestAllocator(&a, 1 << 20);
  TestAllocator(&a, 1);
}
#endif