提交 766c7405 编写于 作者: C chengduoZH

follow comments

上级 2514d70e
......@@ -15,6 +15,6 @@ cc_library(paddle_memory
cc_test(memory_test SRCS memory_test.cc DEPS place paddle_memory)
if (WITH_GPU)
nv_test(pinned_memory_test SRCS pinned_memory_test.cu DEPS place paddle_memory)
endif()
# if (WITH_GPU)
# nv_test(pinned_memory_test SRCS pinned_memory_test.cu DEPS place paddle_memory)
# endif()
......@@ -21,8 +21,9 @@ namespace memory {
namespace detail {
/**
* \brief SystemAllocator is the parent class of CPUAllocator and GPUAllocator.
* A BuddyAllocator object uses a SystemAllocator* pointing to the
* \brief SystemAllocator is the parent class of CPUAllocator,
* CUDAPinnedAllocator and GPUAllocator. A BuddyAllocator
* object uses a SystemAllocator* pointing to the
* underlying system allocator.
*/
class SystemAllocator {
......@@ -43,6 +44,8 @@ class CPUAllocator : public SystemAllocator {
#ifdef PADDLE_WITH_CUDA
class GPUAllocator : public SystemAllocator {
public:
explicit GPUAllocator(int gpu_id) : gpu_id_(gpu_id) {}
virtual void* Alloc(size_t& index, size_t size);
virtual void Free(void* p, size_t size, size_t index);
virtual bool UseGpu() const;
......@@ -50,6 +53,7 @@ class GPUAllocator : public SystemAllocator {
private:
size_t gpu_alloc_size_ = 0;
size_t fallback_alloc_size_ = 0;
int gpu_id_;
};
class CUDAPinnedAllocator : public SystemAllocator {
......
......@@ -24,6 +24,8 @@ limitations under the License. */
#include <gtest/gtest.h>
#include <unordered_map>
// This unit test is an example comparing the performance between using pinned
// memory and not. In general, using pinned memory will be faster.
template <typename T>
__global__ void Kernel(T* output, int dim) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
......@@ -33,7 +35,7 @@ __global__ void Kernel(T* output, int dim) {
}
template <typename Place>
void test_pinned_memory() {
float test_pinned_memory() {
Place cpu_place;
paddle::platform::CUDAPlace cuda_place;
......@@ -133,12 +135,14 @@ void test_pinned_memory() {
paddle::memory::Free(cpu_place, output_pinned_mem[j]);
paddle::memory::Free(cuda_place, gpu_mem[j]);
}
return elapsedTime / 30;
}
TEST(CPUANDCUDAPinned, CPUAllocator) {
test_pinned_memory<paddle::platform::CPUPlace>();
TEST(CPUANDCUDAPinned, CPUAllocatorAndCUDAPinnedAllocator) {
// Generally speaking, operation on pinned_memory is faster than that on
// unpinned-memory, but if this unit test fails frequently, please close this
// test for the time being.
float time1 = test_pinned_memory<paddle::platform::CPUPlace>();
float time2 = test_pinned_memory<paddle::platform::CUDAPinnedPlace>();
EXPECT_GT(time1, time2)
}
TEST(CPUANDCUDAPinned, CUDAPinnedAllocator) {
test_pinned_memory<paddle::platform::CUDAPinnedPlace>();
}
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册