diff --git a/paddle/fluid/memory/allocation/allocator_facade.cc b/paddle/fluid/memory/allocation/allocator_facade.cc index 44cd915b168153343d9d1e3a7896cf6d2309b978..3e9c2ba269fe2f0d3bf8f87da308883687f9b427 100644 --- a/paddle/fluid/memory/allocation/allocator_facade.cc +++ b/paddle/fluid/memory/allocation/allocator_facade.cc @@ -311,6 +311,10 @@ class AllocatorFacadePrivate { void RecordStream(std::shared_ptr allocation, const gpuStream_t& stream) { + if (allocation->size() == 0) { + return; + } + StreamSafeCUDAAllocation* stream_safe_cuda_allocation = dynamic_cast(allocation.get()); PADDLE_ENFORCE_NOT_NULL(stream_safe_cuda_allocation, diff --git a/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu b/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu index 286dcdba8f22fecc55ce487857fc1b434b87b546..52c3825053ca226de6a6058197dffaa9c23944f8 100644 --- a/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu +++ b/paddle/fluid/memory/stream_safe_cuda_alloc_test.cu @@ -243,6 +243,27 @@ TEST(StreamSafeCUDAAllocInterfaceTest, GetAllocatorInterfaceTest) { CheckMemLeak(place); } +TEST(StreamSafeCUDAAllocInterfaceTest, ZeroSizeRecordStreamTest) { + platform::CUDAPlace place = platform::CUDAPlace(); + std::shared_ptr zero_size_allocation = AllocShared(place, 0); + EXPECT_EQ(zero_size_allocation->ptr(), nullptr); + + gpuStream_t stream; +#ifdef PADDLE_WITH_CUDA + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamCreate(&stream)); +#else + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamCreate(&stream)); +#endif + + EXPECT_NO_THROW(RecordStream(zero_size_allocation, stream)); + +#ifdef PADDLE_WITH_CUDA + PADDLE_ENFORCE_GPU_SUCCESS(cudaStreamDestroy(stream)); +#else + PADDLE_ENFORCE_GPU_SUCCESS(hipStreamDestroy(stream)); +#endif +} + TEST(StreamSafeCUDAAllocInterfaceTest, GetStreamInterfaceTest) { platform::CUDAPlace place = platform::CUDAPlace(); size_t alloc_size = 256;