From 201e7157873f9353164f078a6d33fe878514c426 Mon Sep 17 00:00:00 2001 From: liaogang Date: Fri, 28 Jul 2017 11:35:16 +0800 Subject: [PATCH] ENH: Add comments for memory and memcpy --- paddle/memory/memcpy.h | 26 ++++++++++++++++++++++++++ paddle/memory/memory.h | 40 +++++++++++++++++++++++++++++++++++++--- 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/paddle/memory/memcpy.h b/paddle/memory/memcpy.h index 99b1c2e1c3e..2b9c0eada6e 100644 --- a/paddle/memory/memcpy.h +++ b/paddle/memory/memcpy.h @@ -20,13 +20,39 @@ limitations under the License. */ namespace paddle { namespace memory { +/** + * \brief Copy memory from one place to another place. + * + * \param[in] DstPlace Destination allocation place (CPU). + * \param[in] dst Destination memory address. + * \param[in] SrcPlace Source allocation place (CPU). + * \param[in] src Source memory address. + * \param[in] num memory size in bytes to copy. + * + */ template void Copy(DstPlace, void* dst, SrcPlace, const void* src, size_t num); #ifndef PADDLE_ONLY_CPU + +/** + * \brief Copy memory from one place to another place. + * + * \param[in] DstPlace Destination allocation place (CPU or GPU). + * \param[in] dst Destination memory address. + * \param[in] SrcPlace Source allocation place (CPU or GPU). + * \param[in] src Source memory address. + * \param[in] num memory size in bytes to copy. + * \param[in] stream CUDA stream. + * + * \note For GPU memory copy, CUDA stream need to be specified + * for asynchronously memory copy. + * + */ template void Copy(DstPlace, void* dst, SrcPlace, const void* src, size_t num, cudaStream_t stream); + #endif // PADDLE_ONLY_CPU } // namespace memory diff --git a/paddle/memory/memory.h b/paddle/memory/memory.h index fd4d5e7082c..44f567caf9c 100644 --- a/paddle/memory/memory.h +++ b/paddle/memory/memory.h @@ -20,15 +20,49 @@ limitations under the License. */ namespace paddle { namespace memory { +/** + * \brief Allocate memory block in one place. + * + * \param[in] place Allocation place (CPU or GPU). + * \param[in] size Allocation size. + * + * \return Allocated memory block address. + * + * \note If return nullptr, it indicates memory allocation failed + * because insufficient memory in current system. When Alloc + * function is invoked, you must check the returned memory + * address is valid or not. + */ template -void* Alloc(Place, size_t); +void* Alloc(Place place, size_t size); +/** + * \brief Free memory block in one place. + * + * \param[in] place Allocation place (CPU or GPU). + * \param[in] ptr Memory block address to free. + * + */ template -void Free(Place, void*); +void Free(Place place, void* ptr); +/** + * \brief Total size of used memory in one place. + * + * \param[in] place Allocation place (CPU or GPU). + * + */ template -size_t Used(Place); +size_t Used(Place place); +/** + * \brief Free memory block in one place. + * + * \note In some cases, custom deleter is used to + * deallocate the memory automatically for + * std::unique_ptr in tensor.h. + * + */ template class PODDeleter { static_assert(std::is_pod::value, "T must be POD"); -- GitLab