system_allocator_test.cc 3.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/memory/detail/system_allocator.h"
Y
Yi Wang 已提交
16 17 18

#include <memory>

19
#include "gflags/gflags.h"
L
liaogang 已提交
20
#include "gtest/gtest.h"
21
#include "paddle/fluid/memory/allocation/allocator.h"
22 23 24
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#endif
F
fwenguang 已提交
25 26 27
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/enforce.h"
#endif
L
liaogang 已提交
28

29
DECLARE_bool(use_pinned_memory);
30

Y
Yi Wang 已提交
31
void TestAllocator(paddle::memory::detail::SystemAllocator* a, size_t size) {
32 33
  bool freed = false;
  {
L
liaogang 已提交
34
    size_t index;
Y
Update  
Yi Wang 已提交
35
    void* p = a->Alloc(&index, size);
36 37 38 39 40
    if (size > 0) {
      EXPECT_NE(p, nullptr);
    } else {
      EXPECT_EQ(p, nullptr);
    }
41

42
    int* i = static_cast<int*>(p);
43
    std::shared_ptr<int> ptr(i, [&](void* p) {
44
      freed = true;
Y
Yi Wang 已提交
45
      a->Free(p, size, index);
46 47 48
    });
  }
  EXPECT_TRUE(freed);
49 50
}

Y
Yi Wang 已提交
51
TEST(CPUAllocator, NoLockMem) {
52 53
  FLAGS_use_pinned_memory = false;
  paddle::memory::detail::CPUAllocator a;
Y
Yi Wang 已提交
54 55
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
Y
Yi Wang 已提交
56
}
57

58
TEST(CPUAllocator, LockMem) {
59 60
  FLAGS_use_pinned_memory = true;
  paddle::memory::detail::CPUAllocator a;
Y
Yi Wang 已提交
61 62
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
63 64
}

65
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
L
liaogang 已提交
66
TEST(GPUAllocator, Alloc) {
Y
Yu Yang 已提交
67
  paddle::memory::detail::GPUAllocator a(0);
Y
Yi Wang 已提交
68 69
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
L
liaogang 已提交
70
}
71 72 73 74 75 76

TEST(CUDAPinnedAllocator, Alloc) {
  paddle::memory::detail::CUDAPinnedAllocator a;
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
}
77 78 79 80

TEST(GPUAllocator, AllocFailure) {
  paddle::memory::detail::GPUAllocator allocator(0);
  size_t index;
81
  size_t alloc_size = (static_cast<size_t>(1) << 40);  // Very large number
82 83 84 85
  try {
    allocator.Alloc(&index, alloc_size);
    ASSERT_TRUE(false);
  } catch (paddle::memory::allocation::BadAlloc&) {
86
    PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::GpuGetLastError());
87 88
  }
}
L
Luo Tao 已提交
89
#endif
90 91 92 93 94 95 96 97

#ifdef PADDLE_WITH_ASCEND_CL
TEST(NPUAllocator, Alloc) {
  paddle::memory::detail::NPUAllocator a(0);
  TestAllocator(&a, 1 << 20);
  TestAllocator(&a, 1);
}
#endif
F
fwenguang 已提交
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117

#ifdef PADDLE_WITH_MLU
TEST(MLUAllocator, Alloc) {
  paddle::memory::detail::MLUAllocator a(0);
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
}

TEST(MLUAllocator, AllocFailure) {
  paddle::memory::detail::MLUAllocator allocator(0);
  size_t index;
  size_t alloc_size = (static_cast<size_t>(1) << 40);  // Very large number
  try {
    allocator.Alloc(&index, alloc_size);
    ASSERT_TRUE(false);
  } catch (paddle::memory::allocation::BadAlloc&) {
    PADDLE_ENFORCE_MLU_SUCCESS(cnrtGetLastError());
  }
}
#endif