system_allocator.cc 5.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15 16
#include "paddle/fluid/memory/detail/system_allocator.h"
#include "paddle/fluid/platform/assert.h"
C
chengduoZH 已提交
17
#include "paddle/fluid/platform/cpu_info.h"
Y
Yi Wang 已提交
18 19
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h"
20 21 22

#include <stdlib.h>    // for malloc and free
#include <sys/mman.h>  // for mlock and munlock
23
#include <algorithm>   // for std::max
24 25 26 27 28 29 30 31

#include "gflags/gflags.h"

// If use_pinned_memory is true, CPUAllocator calls mlock, which
// returns pinned and locked memory as staging areas for data exchange
// between host and device.  Allocates too much would reduce the amount
// of memory available to the system for paging.  So, by default, we
// should set false to use_pinned_memory.
32
DEFINE_bool(use_pinned_memory, true, "If set, allocate cpu pinned memory.");
33
DECLARE_double(fraction_of_gpu_memory_to_use);
34 35 36 37
namespace paddle {
namespace memory {
namespace detail {

L
liaogang 已提交
38
void* CPUAllocator::Alloc(size_t& index, size_t size) {
39 40 41 42 43
  // According to http://www.cplusplus.com/reference/cstdlib/malloc/,
  // malloc might not return nullptr if size is zero, but the returned
  // pointer shall not be dereferenced -- so we make it nullptr.
  if (size <= 0) return nullptr;

44
  index = 0;  // unlock memory
L
liaogang 已提交
45

46 47
  void* p;

T
tensor-tang 已提交
48
#ifdef PADDLE_WITH_MKLDNN
49 50 51 52 53 54 55
  // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp
  // memory alignment
  PADDLE_ENFORCE_EQ(posix_memalign(&p, 4096ul, size), 0);
#else
  PADDLE_ENFORCE_EQ(posix_memalign(&p, 32ul, size), 0);
#endif
  PADDLE_ENFORCE(p, "Fail to allocate CPU memory: size = %d .", size);
56 57 58 59 60 61

  if (p != nullptr) {
    if (FLAGS_use_pinned_memory) {
      index = 1;
      mlock(p, size);  // lock memory
    }
62
  }
63

64 65 66
  return p;
}

L
liaogang 已提交
67
void CPUAllocator::Free(void* p, size_t size, size_t index) {
68
  if (p != nullptr && index == 1) {
69 70 71 72 73
    munlock(p, size);
  }
  free(p);
}

L
liaogang 已提交
74
bool CPUAllocator::UseGpu() const { return false; }
L
liaogang 已提交
75

76
#ifdef PADDLE_WITH_CUDA
77

L
liaogang 已提交
78
void* GPUAllocator::Alloc(size_t& index, size_t size) {
79 80
  // CUDA documentation doesn't explain if cudaMalloc returns nullptr
  // if size is 0.  We just make sure it does.
L
liaogang 已提交
81
  if (size <= 0) return nullptr;
82 83
  void* p;
  cudaError_t result = cudaMalloc(&p, size);
L
liaogang 已提交
84
  if (result == cudaSuccess) {
85 86
    index = 0;
    gpu_alloc_size_ += size;
L
liaogang 已提交
87
    return p;
88 89 90 91 92 93 94
  } else {
    LOG(WARNING)
        << "Cannot malloc " << size / 1024.0 / 1024.0
        << " MB GPU memory. Please shrink FLAGS_fraction_of_gpu_memory_to_use "
           "environment variable to a lower value. Current value is "
        << FLAGS_fraction_of_gpu_memory_to_use;
    return nullptr;
L
liaogang 已提交
95
  }
96 97
}

L
liaogang 已提交
98
void GPUAllocator::Free(void* p, size_t size, size_t index) {
99 100 101 102 103 104 105 106 107 108 109 110
  cudaError_t err;

  if (index == 0) {
    PADDLE_ASSERT(gpu_alloc_size_ >= size);
    gpu_alloc_size_ -= size;
    err = cudaFree(p);
  } else {
    PADDLE_ASSERT(fallback_alloc_size_ >= size);
    fallback_alloc_size_ -= size;
    err = cudaFreeHost(p);
  }

111 112 113 114 115 116
  // Purposefully allow cudaErrorCudartUnloading, because
  // that is returned if you ever call cudaFree after the
  // driver has already shutdown. This happens only if the
  // process is terminating, in which case we don't care if
  // cudaFree succeeds.
  if (err != cudaErrorCudartUnloading) {
L
liaogang 已提交
117
    PADDLE_ENFORCE(err, "cudaFree{Host} failed in GPUAllocator::Free.");
118 119 120
  }
}

L
liaogang 已提交
121
bool GPUAllocator::UseGpu() const { return true; }
L
liaogang 已提交
122

C
chengduoZH 已提交
123 124
// PINNED memory allows direct DMA transfers by the GPU to and from system
// memory. It’s locked to a physical address.
C
chengduoZH 已提交
125 126
void* CUDAPinnedAllocator::Alloc(size_t& index, size_t size) {
  if (size <= 0) return nullptr;
C
chengduoZH 已提交
127

128
  // NOTE: here, we use CUDAPinnedMaxAllocSize as the maximum memory size
C
chengduoZH 已提交
129
  // of host pinned allocation. Allocates too much would reduce
C
chengduoZH 已提交
130
  // the amount of memory available to the underlying system for paging.
C
chengduoZH 已提交
131
  size_t usable =
132
      paddle::platform::CUDAPinnedMaxAllocSize() - cuda_pinnd_alloc_size_;
C
chengduoZH 已提交
133 134 135

  if (size > usable) return nullptr;

C
chengduoZH 已提交
136
  void* p;
C
chengduoZH 已提交
137
  // PINNED memory is visible to all CUDA contexts.
C
chengduoZH 已提交
138
  cudaError_t result = cudaMallocHost(&p, size);
C
chengduoZH 已提交
139

C
chengduoZH 已提交
140
  if (result == cudaSuccess) {
C
chengduoZH 已提交
141 142
    index = 1;  // PINNED memory
    cuda_pinnd_alloc_size_ += size;
C
chengduoZH 已提交
143 144 145 146 147 148 149 150 151 152
    return p;
  }

  return nullptr;
}

void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) {
  cudaError_t err;
  PADDLE_ASSERT(index == 1);

C
chengduoZH 已提交
153 154
  PADDLE_ASSERT(cuda_pinnd_alloc_size_ >= size);
  cuda_pinnd_alloc_size_ -= size;
C
chengduoZH 已提交
155 156 157
  err = cudaFreeHost(p);

  // Purposefully allow cudaErrorCudartUnloading, because
C
chengduoZH 已提交
158
  // that is returned if you ever call cudaFreeHost after the
C
chengduoZH 已提交
159 160
  // driver has already shutdown. This happens only if the
  // process is terminating, in which case we don't care if
C
chengduoZH 已提交
161
  // cudaFreeHost succeeds.
C
chengduoZH 已提交
162 163 164 165 166
  if (err != cudaErrorCudartUnloading) {
    PADDLE_ENFORCE(err, "cudaFreeHost failed in GPUPinnedAllocator::Free.");
  }
}

C
chengduoZH 已提交
167
bool CUDAPinnedAllocator::UseGpu() const { return false; }
C
chengduoZH 已提交
168

L
Luo Tao 已提交
169
#endif
170 171 172 173

}  // namespace detail
}  // namespace memory
}  // namespace paddle