提交 0d6718fc 编写于 作者: Y Yu Yang

Pass compile

上级 d93b2d03
...@@ -284,7 +284,7 @@ class Vector { ...@@ -284,7 +284,7 @@ class Vector {
bool IsInCPU() const { return flag_ & kDataInCPU; } bool IsInCPU() const { return flag_ & kDataInCPU; }
mutable std::vector<T> cpu_; mutable std::vector<T> cpu_;
mutable std::unique_ptr<memory::Allocation> gpu_; mutable memory::AllocationPtr gpu_;
mutable int flag_; mutable int flag_;
mutable std::mutex mtx_; mutable std::mutex mtx_;
......
...@@ -32,13 +32,10 @@ class StubAllocation : public Allocation { ...@@ -32,13 +32,10 @@ class StubAllocation : public Allocation {
TEST(BestFitAllocator, test_allocation) { TEST(BestFitAllocator, test_allocation) {
StubAllocation stub(4UL * 1024 * 1024 * 1024); StubAllocation stub(4UL * 1024 * 1024 * 1024);
BestFitAllocator allocator(&stub); BestFitAllocator allocator(&stub);
{ { auto allocation = allocator.Allocate(64, allocator.kDefault); }
auto allocation = allocator.Allocate(64);
allocator.FreeUniquePtr(std::move(allocation));
}
{ {
auto allocation = allocator.Allocate(80); auto allocation = allocator.Allocate(80, allocator.kDefault);
{ {
auto best_fit_allocation = auto best_fit_allocation =
...@@ -50,19 +47,18 @@ TEST(BestFitAllocator, test_allocation) { ...@@ -50,19 +47,18 @@ TEST(BestFitAllocator, test_allocation) {
ASSERT_EQ(allocation->ptr(), nullptr); ASSERT_EQ(allocation->ptr(), nullptr);
} }
auto allocation2 = allocator.Allocate(60); auto allocation2 = allocator.Allocate(60, allocator.kDefault);
auto allocation3 = allocator.Allocate(90); auto allocation3 = allocator.Allocate(90, allocator.kDefault);
allocator.FreeUniquePtr(std::move(allocation2)); allocation2.reset();
allocation2 = allocator.Allocate(30); allocation2 = allocator.Allocate(30, allocator.kDefault);
{ {
auto best_fit_allocation = auto best_fit_allocation =
dynamic_cast<BestFitAllocation*>(allocation2.get()); dynamic_cast<BestFitAllocation*>(allocation2.get());
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 80); ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 80);
} }
allocator.FreeUniquePtr(std::move(allocation2)); allocation2.reset();
allocation2 = allocator.Allocate(60, allocator.kDefault);
allocation2 = allocator.Allocate(60);
{ {
auto best_fit_allocation = auto best_fit_allocation =
...@@ -70,23 +66,23 @@ TEST(BestFitAllocator, test_allocation) { ...@@ -70,23 +66,23 @@ TEST(BestFitAllocator, test_allocation) {
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 80); ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 80);
} }
allocator.FreeUniquePtr(std::move(allocation)); allocation.reset();
allocator.FreeUniquePtr(std::move(allocation2)); allocation2.reset();
allocation = allocator.Allocate(80 + 60); allocation = allocator.Allocate(80 + 60, allocator.kDefault);
{ {
auto best_fit_allocation = auto best_fit_allocation =
dynamic_cast<BestFitAllocation*>(allocation.get()); dynamic_cast<BestFitAllocation*>(allocation.get());
ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 0); ASSERT_EQ(best_fit_allocation->ChunkIterator()->offset_, 0);
} }
allocator.FreeUniquePtr(std::move(allocation)); allocation.reset();
allocation = allocator.Allocate(80); allocation = allocator.Allocate(80, allocator.kDefault);
allocation2 = allocator.Allocate(60); allocation2 = allocator.Allocate(60, allocator.kDefault);
allocator.FreeUniquePtr(std::move(allocation)); allocation = nullptr;
allocator.FreeUniquePtr(std::move(allocation3)); allocation2 = nullptr;
allocator.FreeUniquePtr(std::move(allocation2)); allocation3 = nullptr;
ASSERT_EQ(allocator.NumFreeChunks(), 1U); ASSERT_EQ(allocator.NumFreeChunks(), 1U);
} }
...@@ -94,7 +90,8 @@ TEST(BestFitAllocator, test_allocation) { ...@@ -94,7 +90,8 @@ TEST(BestFitAllocator, test_allocation) {
TEST(BestFitAllocator, test_concurrent_cpu_allocation) { TEST(BestFitAllocator, test_concurrent_cpu_allocation) {
CPUAllocator allocator; CPUAllocator allocator;
auto global_allocation = allocator.Allocate(256UL * 1024 * 1024); auto global_allocation =
allocator.Allocate(256UL * 1024 * 1024, allocator.kDefault);
std::unique_ptr<Allocator> best_fit_allocator( std::unique_ptr<Allocator> best_fit_allocator(
new BestFitAllocator(global_allocation.get())); new BestFitAllocator(global_allocation.get()));
...@@ -109,8 +106,8 @@ TEST(BestFitAllocator, test_concurrent_cpu_allocation) { ...@@ -109,8 +106,8 @@ TEST(BestFitAllocator, test_concurrent_cpu_allocation) {
for (size_t i = 0; i < 128; ++i) { for (size_t i = 0; i < 128; ++i) {
size_t allocate_size = dist(engine); size_t allocate_size = dist(engine);
auto allocation = auto allocation = locked_allocator.Allocate(
locked_allocator.Allocate(sizeof(size_t) * allocate_size); sizeof(size_t) * allocate_size, locked_allocator.kDefault);
size_t* data = reinterpret_cast<size_t*>(allocation->ptr()); size_t* data = reinterpret_cast<size_t*>(allocation->ptr());
...@@ -122,8 +119,6 @@ TEST(BestFitAllocator, test_concurrent_cpu_allocation) { ...@@ -122,8 +119,6 @@ TEST(BestFitAllocator, test_concurrent_cpu_allocation) {
for (size_t j = 0; j < allocate_size; ++j) { for (size_t j = 0; j < allocate_size; ++j) {
ASSERT_EQ(data[j], j); ASSERT_EQ(data[j], j);
} }
locked_allocator.FreeUniquePtr(std::move(allocation));
} }
}; };
{ {
...@@ -135,8 +130,6 @@ TEST(BestFitAllocator, test_concurrent_cpu_allocation) { ...@@ -135,8 +130,6 @@ TEST(BestFitAllocator, test_concurrent_cpu_allocation) {
th.join(); th.join();
} }
} }
allocator.FreeUniquePtr(std::move(global_allocation));
} }
} // namespace allocation } // namespace allocation
......
...@@ -35,7 +35,8 @@ struct ForEachFill { ...@@ -35,7 +35,8 @@ struct ForEachFill {
TEST(BestFitAllocator, concurrent_cuda) { TEST(BestFitAllocator, concurrent_cuda) {
CUDAAllocator allocator(platform::CUDAPlace(0)); CUDAAllocator allocator(platform::CUDAPlace(0));
// 256 MB // 256 MB
auto cuda_allocation = allocator.Allocate(256U * 1024 * 1024); auto cuda_allocation =
allocator.Allocate(256U * 1024 * 1024, allocator.kDefault);
LockedAllocator concurrent_allocator( LockedAllocator concurrent_allocator(
std::unique_ptr<Allocator>(new BestFitAllocator(cuda_allocation.get()))); std::unique_ptr<Allocator>(new BestFitAllocator(cuda_allocation.get())));
...@@ -49,8 +50,8 @@ TEST(BestFitAllocator, concurrent_cuda) { ...@@ -49,8 +50,8 @@ TEST(BestFitAllocator, concurrent_cuda) {
for (size_t i = 0; i < 128; ++i) { for (size_t i = 0; i < 128; ++i) {
size_t allocate_size = dist(engine); size_t allocate_size = dist(engine);
auto allocation = auto allocation = concurrent_allocator.Allocate(
concurrent_allocator.Allocate(sizeof(size_t) * allocate_size); sizeof(size_t) * allocate_size, concurrent_allocator.kDefault);
size_t* data = reinterpret_cast<size_t*>(allocation->ptr()); size_t* data = reinterpret_cast<size_t*>(allocation->ptr());
...@@ -66,8 +67,7 @@ TEST(BestFitAllocator, concurrent_cuda) { ...@@ -66,8 +67,7 @@ TEST(BestFitAllocator, concurrent_cuda) {
for (size_t j = 0; j < allocate_size; ++j) { for (size_t j = 0; j < allocate_size; ++j) {
ASSERT_EQ(buf[j], j); ASSERT_EQ(buf[j], j);
} }
allocation = nullptr;
concurrent_allocator.FreeUniquePtr(std::move(allocation));
} }
}; };
...@@ -80,7 +80,7 @@ TEST(BestFitAllocator, concurrent_cuda) { ...@@ -80,7 +80,7 @@ TEST(BestFitAllocator, concurrent_cuda) {
th.join(); th.join();
} }
} }
allocator.FreeUniquePtr(std::move(cuda_allocation)); // allocator.FreeUniquePtr(std::move(cuda_allocation));
} }
} // namespace allocation } // namespace allocation
......
...@@ -35,7 +35,7 @@ inline std::unique_ptr<BufferedAllocator> GetBufferedAllocator( ...@@ -35,7 +35,7 @@ inline std::unique_ptr<BufferedAllocator> GetBufferedAllocator(
TEST(buffered_allocator, thread_safety) { TEST(buffered_allocator, thread_safety) {
std::unique_ptr<CPUAllocator> allocator(new CPUAllocator()); std::unique_ptr<CPUAllocator> allocator(new CPUAllocator());
auto chunk = allocator->Allocate(1 << 20); auto chunk = allocator->Allocate(1 << 20, allocator->kDefault);
{ {
auto buf_allocator = GetBufferedAllocator(chunk.get(), true); auto buf_allocator = GetBufferedAllocator(chunk.get(), true);
ASSERT_EQ(buf_allocator->IsAllocThreadSafe(), true); ASSERT_EQ(buf_allocator->IsAllocThreadSafe(), true);
...@@ -45,8 +45,6 @@ TEST(buffered_allocator, thread_safety) { ...@@ -45,8 +45,6 @@ TEST(buffered_allocator, thread_safety) {
auto buf_allocator = GetBufferedAllocator(chunk.get(), false); auto buf_allocator = GetBufferedAllocator(chunk.get(), false);
ASSERT_EQ(buf_allocator->IsAllocThreadSafe(), false); ASSERT_EQ(buf_allocator->IsAllocThreadSafe(), false);
} }
allocator->FreeUniquePtr(std::move(chunk));
} }
class StubAllocation : public Allocation { class StubAllocation : public Allocation {
...@@ -54,27 +52,8 @@ class StubAllocation : public Allocation { ...@@ -54,27 +52,8 @@ class StubAllocation : public Allocation {
using Allocation::Allocation; using Allocation::Allocation;
}; };
class StubAllocator : public UnmanagedAllocator { class StubAllocator : public MannualFreeAllocator {
public: public:
std::unique_ptr<Allocation> Allocate(size_t size,
Allocator::Attr attr) override {
++construct_count_;
if (size == 0) {
return std::unique_ptr<Allocation>(
new StubAllocation(nullptr, 0, platform::CPUPlace()));
} else {
return std::unique_ptr<Allocation>(
new StubAllocation(new uint8_t[size], size, platform::CPUPlace()));
}
}
void FreeUniquePtr(std::unique_ptr<Allocation> allocation) {
StubAllocation *alloc = dynamic_cast<StubAllocation *>(allocation.get());
PADDLE_ENFORCE_NOT_NULL(alloc);
if (alloc->ptr()) delete[] static_cast<uint8_t *>(alloc->ptr());
++destruct_count_;
}
void ResetCounter() { void ResetCounter() {
construct_count_ = 0; construct_count_ = 0;
destruct_count_ = 0; destruct_count_ = 0;
...@@ -84,6 +63,23 @@ class StubAllocator : public UnmanagedAllocator { ...@@ -84,6 +63,23 @@ class StubAllocator : public UnmanagedAllocator {
size_t GetFreeCount() const { return destruct_count_; } size_t GetFreeCount() const { return destruct_count_; }
protected:
void Free(Allocation *allocation) override {
auto *alloc = dynamic_cast<StubAllocation *>(allocation);
PADDLE_ENFORCE_NOT_NULL(alloc);
if (alloc->ptr()) delete[] static_cast<uint8_t *>(alloc->ptr());
++destruct_count_;
delete allocation;
}
Allocation *AllocateImpl(size_t size, Allocator::Attr attr) override {
++construct_count_;
if (size == 0) {
return new StubAllocation(nullptr, 0, platform::CPUPlace());
} else {
return new StubAllocation(new uint8_t[size], size, platform::CPUPlace());
}
}
private: private:
size_t construct_count_ = 0; size_t construct_count_ = 0;
size_t destruct_count_ = 0; size_t destruct_count_ = 0;
...@@ -101,24 +97,24 @@ TEST(buffered_allocator, lazy_free) { ...@@ -101,24 +97,24 @@ TEST(buffered_allocator, lazy_free) {
{ {
underlying_allocator->ResetCounter(); underlying_allocator->ResetCounter();
auto x = allocator->Allocate(1025); auto x = allocator->Allocate(1025, allocator->kDefault);
ASSERT_EQ(underlying_allocator->GetAllocCount(), kOne); ASSERT_EQ(underlying_allocator->GetAllocCount(), kOne);
ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero);
allocator->FreeUniquePtr(std::move(x)); x = nullptr;
ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero);
} }
{ {
underlying_allocator->ResetCounter(); underlying_allocator->ResetCounter();
auto x = allocator->Allocate(900); auto x = allocator->Allocate(900, allocator->kDefault);
ASSERT_EQ(underlying_allocator->GetAllocCount(), kZero); ASSERT_EQ(underlying_allocator->GetAllocCount(), kZero);
ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero);
auto y = allocator->Allocate(2048); auto y = allocator->Allocate(2048, allocator->kDefault);
ASSERT_EQ(underlying_allocator->GetAllocCount(), kOne); ASSERT_EQ(underlying_allocator->GetAllocCount(), kOne);
ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero);
allocator->FreeUniquePtr(std::move(x)); x = nullptr;
ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero);
allocator->FreeUniquePtr(std::move(y)); y = nullptr;
ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero); ASSERT_EQ(underlying_allocator->GetFreeCount(), kZero);
} }
...@@ -132,13 +128,13 @@ TEST(buffered_allocator, lazy_free) { ...@@ -132,13 +128,13 @@ TEST(buffered_allocator, lazy_free) {
TEST(buffered_allocator, garbage_collection) { TEST(buffered_allocator, garbage_collection) {
std::unique_ptr<CPUAllocator> cpu_allocator(new CPUAllocator()); std::unique_ptr<CPUAllocator> cpu_allocator(new CPUAllocator());
auto chunk = cpu_allocator->Allocate(2048); auto chunk = cpu_allocator->Allocate(2048, cpu_allocator->kDefault);
auto allocator = GetBufferedAllocator(chunk.get(), false); auto allocator = GetBufferedAllocator(chunk.get(), false);
auto x1 = allocator->Allocate(1600); auto x1 = allocator->Allocate(1600, allocator->kDefault);
auto x2 = allocator->Allocate(400); auto x2 = allocator->Allocate(400, allocator->kDefault);
allocator->FreeUniquePtr(std::move(x1)); x1 = nullptr;
allocator->FreeUniquePtr(std::move(x2)); x2 = nullptr;
auto x3 = allocator->Allocate(1600); auto x3 = allocator->Allocate(1600, allocator->kDefault);
ASSERT_NE(x3, nullptr); ASSERT_NE(x3, nullptr);
ASSERT_NE(x3->ptr(), nullptr); ASSERT_NE(x3->ptr(), nullptr);
} }
......
...@@ -32,7 +32,7 @@ TEST(RetryAllocator, RetryAllocator) { ...@@ -32,7 +32,7 @@ TEST(RetryAllocator, RetryAllocator) {
CPUAllocator cpu_allocator; CPUAllocator cpu_allocator;
size_t size = (1 << 20); size_t size = (1 << 20);
auto cpu_allocation = cpu_allocator.Allocate(size); auto cpu_allocation = cpu_allocator.Allocate(size, cpu_allocator.kDefault);
std::unique_ptr<BestFitAllocator> best_fit_allocator( std::unique_ptr<BestFitAllocator> best_fit_allocator(
new BestFitAllocator(cpu_allocation.get())); new BestFitAllocator(cpu_allocation.get()));
...@@ -44,14 +44,14 @@ TEST(RetryAllocator, RetryAllocator) { ...@@ -44,14 +44,14 @@ TEST(RetryAllocator, RetryAllocator) {
size_t extra_time = 2; size_t extra_time = 2;
// Reserve to perform more tests in the future // Reserve to perform more tests in the future
std::vector<std::shared_ptr<ManagedAllocator>> allocators; std::vector<std::shared_ptr<Allocator>> allocators;
{ {
std::unique_ptr<BestFitAllocator> best_fit_allocator( std::unique_ptr<BestFitAllocator> best_fit_allocator(
new BestFitAllocator(cpu_allocation.get())); new BestFitAllocator(cpu_allocation.get()));
std::unique_ptr<LockedAllocator> locked_allocator( std::unique_ptr<LockedAllocator> locked_allocator(
new LockedAllocator(std::move(best_fit_allocator))); new LockedAllocator(std::move(best_fit_allocator)));
allocators.push_back( allocators.push_back(std::make_shared<RetryAllocator>(
RetryAllocator::Create(std::move(locked_allocator), std::move(locked_allocator),
(thread_num - 1) * (sleep_time + extra_time))); (thread_num - 1) * (sleep_time + extra_time)));
} }
...@@ -91,8 +91,6 @@ TEST(RetryAllocator, RetryAllocator) { ...@@ -91,8 +91,6 @@ TEST(RetryAllocator, RetryAllocator) {
[val](void *p) { return p == val; }); [val](void *p) { return p == val; });
ASSERT_TRUE(is_all_equal); ASSERT_TRUE(is_all_equal);
} }
cpu_allocator.FreeUniquePtr(std::move(cpu_allocation));
} }
} // namespace allocation } // namespace allocation
......
...@@ -110,7 +110,7 @@ class CudnnHolder { ...@@ -110,7 +110,7 @@ class CudnnHolder {
std::mutex& Mutex() { return mtx_; } std::mutex& Mutex() { return mtx_; }
cudnnHandle_t cudnn_handle_; cudnnHandle_t cudnn_handle_;
std::unique_ptr<memory::Allocation> workspace_; memory::AllocationPtr workspace_;
const cudaStream_t* stream_; // not owned; const cudaStream_t* stream_; // not owned;
const CUDAPlace place_; const CUDAPlace place_;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册