system_allocator_test.cc 2.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
liaogang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/memory/detail/system_allocator.h"
Y
Yi Wang 已提交
16 17 18 19

#include <memory>
#include <vector>

20
#include "gflags/gflags.h"
L
liaogang 已提交
21
#include "gtest/gtest.h"
22
#include "paddle/fluid/memory/allocation/allocator.h"
L
liaogang 已提交
23

24
DECLARE_bool(use_pinned_memory);
25

Y
Yi Wang 已提交
26
void TestAllocator(paddle::memory::detail::SystemAllocator* a, size_t size) {
27 28
  bool freed = false;
  {
L
liaogang 已提交
29
    size_t index;
Y
Update  
Yi Wang 已提交
30
    void* p = a->Alloc(&index, size);
31 32 33 34 35
    if (size > 0) {
      EXPECT_NE(p, nullptr);
    } else {
      EXPECT_EQ(p, nullptr);
    }
36

37
    int* i = static_cast<int*>(p);
38
    std::shared_ptr<int> ptr(i, [&](void* p) {
39
      freed = true;
Y
Yi Wang 已提交
40
      a->Free(p, size, index);
41 42 43
    });
  }
  EXPECT_TRUE(freed);
44 45
}

Y
Yi Wang 已提交
46
TEST(CPUAllocator, NoLockMem) {
47 48
  FLAGS_use_pinned_memory = false;
  paddle::memory::detail::CPUAllocator a;
Y
Yi Wang 已提交
49 50
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
Y
Yi Wang 已提交
51
}
52

53
TEST(CPUAllocator, LockMem) {
54 55
  FLAGS_use_pinned_memory = true;
  paddle::memory::detail::CPUAllocator a;
Y
Yi Wang 已提交
56 57
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
58 59
}

60
#ifdef PADDLE_WITH_CUDA
L
liaogang 已提交
61
TEST(GPUAllocator, Alloc) {
Y
Yu Yang 已提交
62
  paddle::memory::detail::GPUAllocator a(0);
Y
Yi Wang 已提交
63 64
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
L
liaogang 已提交
65
}
66 67 68 69 70 71

TEST(CUDAPinnedAllocator, Alloc) {
  paddle::memory::detail::CUDAPinnedAllocator a;
  TestAllocator(&a, 2048);
  TestAllocator(&a, 0);
}
72 73 74 75 76 77 78 79 80 81 82 83

TEST(GPUAllocator, AllocFailure) {
  paddle::memory::detail::GPUAllocator allocator(0);
  size_t index;
  size_t alloc_size = -1UL;  // very large size
  try {
    allocator.Alloc(&index, alloc_size);
    ASSERT_TRUE(false);
  } catch (paddle::memory::allocation::BadAlloc&) {
    PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetLastError());
  }
}
L
Luo Tao 已提交
84
#endif