diff --git a/paddle/memory/detail/system_allocator.cc b/paddle/memory/detail/system_allocator.cc index f61e67a32906083881dd7f47433521876be9b355..a270bd59581520859d43cddd2fc0cfa72080f46d 100644 --- a/paddle/memory/detail/system_allocator.cc +++ b/paddle/memory/detail/system_allocator.cc @@ -27,7 +27,7 @@ limitations under the License. */ // between host and device. Allocates too much would reduce the amount // of memory available to the system for paging. So, by default, we // should set false to use_pinned_memory. -DEFINE_bool(use_pinned_memory, false, "If set, allocate cpu pinned memory."); +DEFINE_bool(use_pinned_memory, true, "If set, allocate cpu pinned memory."); namespace paddle { namespace memory { diff --git a/paddle/memory/memory.cc b/paddle/memory/memory.cc index 5946c3ea4af5a60f2220b0c10e517e036a2c1050..684635405a5e46b7bc3de2494063c05d3b46b306 100644 --- a/paddle/memory/memory.cc +++ b/paddle/memory/memory.cc @@ -63,7 +63,7 @@ size_t Used(platform::CPUPlace place) { BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { using BuddyAllocVec = std::vector; static std::unique_ptr as{ - new std::vector, [](BuddyAllocVec* p) { + new BuddyAllocVec, [](BuddyAllocVec* p) { std::for_each(p->begin(), p->end(), [](BuddyAllocator* p) { delete p; }); }};