提交 ab5442d9 编写于 作者: L liuwei1031

Merge branch 'develop' of https://github.com/liuwei1031/Paddle into develop

......@@ -117,6 +117,11 @@ void* GPUAllocator::Alloc(size_t* index, size_t size) {
if (result == cudaSuccess) {
*index = 0;
gpu_alloc_size_ += size;
if (gpu_alloc_size_ > s_memoryMap[gpu_id_]) {
s_memoryMap[gpu_id_] = gpu_alloc_size_;
VLOG(3) << "device: " << gpu_id_
<< " maximum memory size : " <<(gpu_alloc_size_ >> 20) << " MiB";
}
return p;
} else {
LOG(WARNING)
......
......@@ -15,6 +15,7 @@ limitations under the License. */
#pragma once
#include <stddef.h> // for size_t
#include <unordered_map>
namespace paddle {
namespace memory {
......@@ -44,6 +45,8 @@ class CPUAllocator : public SystemAllocator {
#ifdef PADDLE_WITH_CUDA
class GPUAllocator : public SystemAllocator {
public:
std::unordered_map<int, uint64_t> s_memoryMap;
explicit GPUAllocator(int gpu_id) : gpu_id_(gpu_id) {}
virtual void* Alloc(size_t* index, size_t size);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册