diff --git a/paddle/memory/detail/gpu_allocator.h b/paddle/memory/detail/gpu_allocator.h index 9452c41fb897554c731ffd00a4fb70b7d4abf0b6..682afdf7d3349aee107ed393ce2dbefebe8ff82f 100644 --- a/paddle/memory/detail/gpu_allocator.h +++ b/paddle/memory/detail/gpu_allocator.h @@ -16,8 +16,8 @@ limitations under the License. */ #include // for size_t -#include #include +#include namespace paddle { namespace memory { @@ -36,14 +36,14 @@ inline void throw_on_error(cudaError_t e, const char* message) { // default, we should use GPUAllocator. template class GPUAllocator { -public: + public: void* Alloc(size_t size); void Free(void* p, size_t size); }; template <> class GPUAllocator { -public: + public: void* Alloc(size_t size) { void* p = 0; cudaError_t result = cudaMalloc(&p, size); @@ -60,22 +60,22 @@ public: // that is returned if you ever call cudaFree after the // driver has already shutdown. This happens only if the // process is terminating, in which case we don't care if - // cudaFree succeeds. + // cudaFree succeeds. auto err = cudaFree(p); if (err != cudaErrorCudartUnloading) { - throw_on_error(err, "cudaFree failed"); + throw_on_error(err, "cudaFree failed"); } } }; template <> class GPUAllocator { -public: + public: void* Alloc(size_t size) { void* p = 0; cudaError_t result = cudaMallocHost(&p, size); if (result == cudaSuccess) { - return p; + return p; } // clear last error cudaGetLastError();