提交 871a3f6e 编写于 作者: L Luo Tao

remove unused PADDLE_ONLY_CPU comment

上级 8e2cc754
...@@ -162,4 +162,4 @@ int main(int argc, char** argv) { ...@@ -162,4 +162,4 @@ int main(int argc, char** argv) {
return RUN_ALL_TESTS(); return RUN_ALL_TESTS();
} }
#endif /* PADDLE_ONLY_CPU */ #endif
...@@ -182,7 +182,7 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() { ...@@ -182,7 +182,7 @@ BuddyAllocator::PoolSet::iterator BuddyAllocator::RefillPool() {
max_chunk_size_ = platform::GpuMaxChunkSize(); max_chunk_size_ = platform::GpuMaxChunkSize();
} }
} }
#endif // PADDLE_ONLY_CPU #endif
// Allocate a new maximum sized block // Allocate a new maximum sized block
size_t index = 0; size_t index = 0;
......
...@@ -134,7 +134,7 @@ void GPUAllocator::Free(void* p, size_t size, size_t index) { ...@@ -134,7 +134,7 @@ void GPUAllocator::Free(void* p, size_t size, size_t index) {
bool GPUAllocator::UseGpu() const { return true; } bool GPUAllocator::UseGpu() const { return true; }
#endif // PADDLE_ONLY_CPU #endif
} // namespace detail } // namespace detail
} // namespace memory } // namespace memory
......
...@@ -51,7 +51,7 @@ class GPUAllocator : public SystemAllocator { ...@@ -51,7 +51,7 @@ class GPUAllocator : public SystemAllocator {
size_t gpu_alloc_size_ = 0; size_t gpu_alloc_size_ = 0;
size_t fallback_alloc_size_ = 0; size_t fallback_alloc_size_ = 0;
}; };
#endif // PADDLE_ONLY_CPU #endif
} // namespace detail } // namespace detail
} // namespace memory } // namespace memory
......
...@@ -62,4 +62,4 @@ TEST(GPUAllocator, Alloc) { ...@@ -62,4 +62,4 @@ TEST(GPUAllocator, Alloc) {
TestAllocator(a, 2048); TestAllocator(a, 2048);
TestAllocator(a, 0); TestAllocator(a, 0);
} }
#endif // PADDLE_ONLY_CPU #endif
...@@ -89,7 +89,7 @@ void Copy<platform::GPUPlace, platform::GPUPlace>(platform::GPUPlace dst_place, ...@@ -89,7 +89,7 @@ void Copy<platform::GPUPlace, platform::GPUPlace>(platform::GPUPlace dst_place,
platform::GpuMemcpySync(dst, src, num, cudaMemcpyDeviceToDevice); platform::GpuMemcpySync(dst, src, num, cudaMemcpyDeviceToDevice);
} }
#endif // PADDLE_ONLY_CPU #endif
} // namespace memory } // namespace memory
} // namespace paddle } // namespace paddle
...@@ -53,7 +53,7 @@ template <typename DstPlace, typename SrcPlace> ...@@ -53,7 +53,7 @@ template <typename DstPlace, typename SrcPlace>
void Copy(DstPlace, void* dst, SrcPlace, const void* src, size_t num, void Copy(DstPlace, void* dst, SrcPlace, const void* src, size_t num,
cudaStream_t stream); cudaStream_t stream);
#endif // PADDLE_ONLY_CPU #endif
} // namespace memory } // namespace memory
} // namespace paddle } // namespace paddle
...@@ -111,7 +111,7 @@ size_t Used<platform::GPUPlace>(platform::GPUPlace place) { ...@@ -111,7 +111,7 @@ size_t Used<platform::GPUPlace>(platform::GPUPlace place) {
return GetGPUBuddyAllocator(place.device)->Used(); return GetGPUBuddyAllocator(place.device)->Used();
} }
#endif // PADDLE_ONLY_CPU #endif
} // namespace memory } // namespace memory
} // namespace paddle } // namespace paddle
...@@ -135,4 +135,4 @@ TEST(BuddyAllocator, GPUMultAlloc) { ...@@ -135,4 +135,4 @@ TEST(BuddyAllocator, GPUMultAlloc) {
} }
} }
#endif // PADDLE_ONLY_CPU #endif
...@@ -136,7 +136,7 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() const { return cudnn_handle_; } ...@@ -136,7 +136,7 @@ cudnnHandle_t CUDADeviceContext::cudnn_handle() const { return cudnn_handle_; }
cudaStream_t CUDADeviceContext::stream() const { return stream_; } cudaStream_t CUDADeviceContext::stream() const { return stream_; }
#endif // PADDLE_ONLY_CPU #endif
} // namespace platform } // namespace platform
} // namespace paddle } // namespace paddle
...@@ -41,7 +41,7 @@ limitations under the License. */ ...@@ -41,7 +41,7 @@ limitations under the License. */
#include <thrust/system/cuda/error.h> #include <thrust/system/cuda/error.h>
#include <thrust/system_error.h> #include <thrust/system_error.h>
#endif // PADDLE_ONLY_CPU #endif
namespace paddle { namespace paddle {
namespace platform { namespace platform {
......
...@@ -63,4 +63,4 @@ void GpuMemcpyPeer(void *dst, int dst_device, const void *src, int src_device, ...@@ -63,4 +63,4 @@ void GpuMemcpyPeer(void *dst, int dst_device, const void *src, int src_device,
} // namespace platform } // namespace platform
} // namespace paddle } // namespace paddle
#endif // PADDLE_ONLY_CPU #endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册