From 45e77154cf5a46f789bb89aeb30c6de769a18bfb Mon Sep 17 00:00:00 2001 From: QI JUN Date: Tue, 9 Jan 2018 20:24:09 +0800 Subject: [PATCH] add general memory usage interface for both CPU/CUDA (#7352) --- paddle/memory/memory.cc | 16 ++++++++++++++++ paddle/memory/memory.h | 7 +++++++ paddle/memory/memory_test.cc | 6 ++++++ 3 files changed, 29 insertions(+) diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index c4bb6baee7..1a73a94567 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -114,5 +114,21 @@ void Free(platform::CUDAPlace place, void* p) { #endif +size_t Usage::operator()(const platform::CPUPlace& cpu) const { + return Used(cpu); +} + +size_t Usage::operator()(const platform::CUDAPlace& gpu) const { +#ifdef PADDLE_WITH_CUDA + return Used(gpu); +#else + PADDLE_THROW("'CUDAPlace' is not supported in CPU only device."); +#endif +} + +size_t memory_usage(const platform::Place& p) { + return boost::apply_visitor(Usage(), p); +} + } // namespace memory } // namespace paddle diff --git a/paddle/memory/memory.h b/paddle/memory/memory.h index 11bbb88187..7012b6d331 100644 --- a/paddle/memory/memory.h +++ b/paddle/memory/memory.h @@ -54,6 +54,13 @@ void Free(Place place, void* ptr); template size_t Used(Place place); +struct Usage : public boost::static_visitor { + size_t operator()(const platform::CPUPlace& cpu) const; + size_t operator()(const platform::CUDAPlace& gpu) const; +}; + +size_t memory_usage(const platform::Place& p); + /** * \brief Free memory block in one place. * diff --git a/paddle/memory/memory_test.cc b/paddle/memory/memory_test.cc index f476bf7126..b3f699f9b7 100644 --- a/paddle/memory/memory_test.cc +++ b/paddle/memory/memory_test.cc @@ -44,6 +44,9 @@ TEST(BuddyAllocator, CPUAllocation) { EXPECT_NE(p, nullptr); + paddle::platform::Place place = cpu; + EXPECT_EQ(paddle::memory::Used(cpu), paddle::memory::memory_usage(place)); + paddle::memory::Free(cpu, p); } @@ -99,6 +102,9 @@ TEST(BuddyAllocator, GPUAllocation) { EXPECT_NE(p, nullptr); + paddle::platform::Place place = gpu; + EXPECT_EQ(paddle::memory::Used(gpu), paddle::memory::memory_usage(place)); + paddle::memory::Free(gpu, p); } -- GitLab