diff --git a/paddle/fluid/memory/allocation/legacy_allocator.cc b/paddle/fluid/memory/allocation/legacy_allocator.cc index ef62f758e37f28ab826faac84fd1276b14de7980..6d93f93cd0d79acc3ff01c7001358023b6bb4b46 100644 --- a/paddle/fluid/memory/allocation/legacy_allocator.cc +++ b/paddle/fluid/memory/allocation/legacy_allocator.cc @@ -257,7 +257,7 @@ void *Alloc(const platform::CUDAPinnedPlace &place, void *ptr = buddy_allocator->Alloc(size); if (ptr == nullptr) { - LOG(WARNING) << "cudaMallocHost Cannot allocate " << size + LOG(WARNING) << "cudaHostAlloc Cannot allocate " << size << " bytes in CUDAPinnedPlace"; } if (FLAGS_init_allocated_mem) { diff --git a/paddle/fluid/memory/allocation/pinned_allocator.cc b/paddle/fluid/memory/allocation/pinned_allocator.cc index 6ac3aefdd18d6d9a21dc7ce66511013dfb78bc5b..de81d12cca6ca280289371abdec225c9e2b8f4d0 100644 --- a/paddle/fluid/memory/allocation/pinned_allocator.cc +++ b/paddle/fluid/memory/allocation/pinned_allocator.cc @@ -32,7 +32,7 @@ Allocation *CPUPinnedAllocator::AllocateImpl(size_t size, // "CPUPinnedAllocator should be used for Cross-Device Communication"); void *ptr; - PADDLE_ENFORCE(cudaMallocHost(&ptr, size)); + PADDLE_ENFORCE(cudaHostAlloc(&ptr, size, cudaHostAllocPortable)); return new CPUPinnedAllocation(ptr, size); } } // namespace allocation diff --git a/paddle/fluid/memory/allocation/pinned_allocator.h b/paddle/fluid/memory/allocation/pinned_allocator.h index 26d12dd91c7fda31802226a84d883b6a6e9abbe4..42d0938f2afbb1efca8bfdd7035bc0eada30f06b 100644 --- a/paddle/fluid/memory/allocation/pinned_allocator.h +++ b/paddle/fluid/memory/allocation/pinned_allocator.h @@ -19,7 +19,7 @@ namespace paddle { namespace memory { namespace allocation { -// Allocator uses `cudaMallocHost` +// Allocator uses `cudaHostAlloc` class CPUPinnedAllocation : public Allocation { public: CPUPinnedAllocation(void *ptr, size_t size) diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index 3e8fb83e9d5ba2078bcf37e4a4af74708df9c11c..197d1c2f21fd818879aafe17599bc87d33caa198 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -173,14 +173,14 @@ void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) { void* p; // PINNED memory is visible to all CUDA contexts. - cudaError_t result = cudaMallocHost(&p, size); + cudaError_t result = cudaHostAlloc(&p, size, cudaHostAllocPortable); if (result == cudaSuccess) { *index = 1; // PINNED memory cuda_pinnd_alloc_size_ += size; return p; } else { - LOG(WARNING) << "cudaMallocHost failed."; + LOG(WARNING) << "cudaHostAlloc failed."; return nullptr; } diff --git a/paddle/fluid/operators/reader/buffered_reader.cc b/paddle/fluid/operators/reader/buffered_reader.cc index 971db8b37d8409185a4e81d1e77b0fc53534e9f5..defc29b91f81cb851fec24c5cd9d62dc72c54147 100644 --- a/paddle/fluid/operators/reader/buffered_reader.cc +++ b/paddle/fluid/operators/reader/buffered_reader.cc @@ -29,6 +29,7 @@ BufferedReader::~BufferedReader() { if (platform::is_gpu_place(place_)) { platform::SetDeviceId(boost::get(place_).device); PADDLE_ENFORCE(cudaStreamDestroy(stream)); + for (auto &event : events) PADDLE_ENFORCE(cudaEventDestroy(event)); } #endif } @@ -43,7 +44,14 @@ BufferedReader::BufferedReader( #ifdef PADDLE_WITH_CUDA if (platform::is_gpu_place(place_)) { platform::SetDeviceId(boost::get(place_).device); - PADDLE_ENFORCE(cudaStreamCreate(&stream)); + compute_stream = + ((platform::CUDADeviceContext *)(platform::DeviceContextPool::Instance() + .Get(place_))) + ->stream(); + events.resize(buffer_size); + for (auto &event : events) + PADDLE_ENFORCE(cudaEventCreateWithFlags(&event, cudaEventDisableTiming)); + PADDLE_ENFORCE(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); } #endif cpu_buffer_.resize(buffer_size); @@ -59,6 +67,12 @@ void BufferedReader::ReadTillBufferFullAsync() { } void BufferedReader::ReadAsync(size_t i) { +#ifdef PADDLE_WITH_CUDA + if (platform::is_gpu_place(place_)) { + platform::SetDeviceId(boost::get(place_).device); + PADDLE_ENFORCE(cudaEventRecord(events[i], compute_stream)); + } +#endif position_.emplace(thread_pool_.enqueue([this, i]() -> size_t { TensorVec &cpu = cpu_buffer_[i]; reader_->ReadNext(&cpu); @@ -71,6 +85,8 @@ void BufferedReader::ReadAsync(size_t i) { // NOTE(liangdun): using async copy instead of TensorCopySync // TensorCopySync would block other stream if (platform::is_gpu_place(place_)) { + platform::SetDeviceId(boost::get(place_).device); + PADDLE_ENFORCE(cudaStreamWaitEvent(stream, events[i], 0)); TensorVec &gpu = gpu_buffer_[i]; gpu.resize(cpu.size()); for (size_t i = 0; i < cpu.size(); ++i) { diff --git a/paddle/fluid/operators/reader/buffered_reader.h b/paddle/fluid/operators/reader/buffered_reader.h index e55572177ccd7cd18695bdecc4c65a25ffd6b5d4..87680da01a1f51cfdfe4d100508440eda9d1877f 100644 --- a/paddle/fluid/operators/reader/buffered_reader.h +++ b/paddle/fluid/operators/reader/buffered_reader.h @@ -64,6 +64,8 @@ class BufferedReader : public framework::DecoratedReader { size_t prev_pos_{-1UL}; #ifdef PADDLE_WITH_CUDA cudaStream_t stream; + cudaStream_t compute_stream; + std::vector events; #endif };