system_allocator.cc 7.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14
#define GLOG_NO_ABBREVIATED_SEVERITIES
15

Y
Yi Wang 已提交
16
#include "paddle/fluid/memory/detail/system_allocator.h"
17

D
dzhwinter 已提交
18 19 20 21
#ifdef _WIN32
#include <malloc.h>
#include <windows.h>  // VirtualLock/VirtualUnlock
#else
22
#include <sys/mman.h>  // for mlock and munlock
D
dzhwinter 已提交
23
#endif
24 25
#include <stdlib.h>  // for malloc and free

D
dzhwinter 已提交
26
#include <algorithm>  // for std::max
27 28
#include <string>
#include <utility>
29 30

#include "gflags/gflags.h"
31
#include "paddle/fluid/memory/allocation/allocator.h"
Y
Yi Wang 已提交
32 33 34
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/gpu_info.h"
35 36 37
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cuda_device_guard.h"
#endif
38

S
sneaxiy 已提交
39
DECLARE_bool(use_pinned_memory);
40
DECLARE_double(fraction_of_gpu_memory_to_use);
41 42
DECLARE_uint64(initial_gpu_memory_in_mb);
DECLARE_uint64(reallocate_gpu_memory_in_mb);
Z
zhhsplendid 已提交
43

44 45 46 47
namespace paddle {
namespace memory {
namespace detail {

D
dzhwinter 已提交
48
void* AlignedMalloc(size_t size) {
G
gongweibao 已提交
49
  void* p = nullptr;
D
dzhwinter 已提交
50
  size_t alignment = 32ul;
T
tensor-tang 已提交
51
#ifdef PADDLE_WITH_MKLDNN
52 53
  // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp
  // memory alignment
D
dzhwinter 已提交
54 55 56 57
  alignment = 4096ul;
#endif
#ifdef _WIN32
  p = _aligned_malloc(size, alignment);
58
#else
59 60 61 62 63
  int error = posix_memalign(&p, alignment, size);
  PADDLE_ENFORCE_EQ(
      error, 0,
      platform::errors::ResourceExhausted(
          "Fail to alloc memory of %ld size, error code is %d.", size, error));
64
#endif
65 66
  PADDLE_ENFORCE_NOT_NULL(p, platform::errors::ResourceExhausted(
                                 "Fail to alloc memory of %ld size.", size));
D
dzhwinter 已提交
67 68 69 70 71 72 73 74 75 76 77 78
  return p;
}

void* CPUAllocator::Alloc(size_t* index, size_t size) {
  // According to http://www.cplusplus.com/reference/cstdlib/malloc/,
  // malloc might not return nullptr if size is zero, but the returned
  // pointer shall not be dereferenced -- so we make it nullptr.
  if (size <= 0) return nullptr;

  *index = 0;  // unlock memory

  void* p = AlignedMalloc(size);
79 80 81

  if (p != nullptr) {
    if (FLAGS_use_pinned_memory) {
Y
Yi Wang 已提交
82
      *index = 1;
D
dzhwinter 已提交
83 84 85
#ifdef _WIN32
      VirtualLock(p, size);
#else
86
      mlock(p, size);  // lock memory
D
dzhwinter 已提交
87
#endif
88
    }
89
  }
90

91 92 93
  return p;
}

L
liaogang 已提交
94
void CPUAllocator::Free(void* p, size_t size, size_t index) {
95
  if (p != nullptr && index == 1) {
D
dzhwinter 已提交
96 97 98
#ifdef _WIN32
    VirtualUnlock(p, size);
#else
99
    munlock(p, size);
D
dzhwinter 已提交
100
#endif
101
  }
P
peizhilin 已提交
102 103 104
#ifdef _WIN32
  _aligned_free(p);
#else
105
  free(p);
P
peizhilin 已提交
106
#endif
107 108
}

L
liaogang 已提交
109
bool CPUAllocator::UseGpu() const { return false; }
L
liaogang 已提交
110

111
#ifdef PADDLE_WITH_CUDA
112

Y
Yi Wang 已提交
113
void* GPUAllocator::Alloc(size_t* index, size_t size) {
114 115
  // CUDA documentation doesn't explain if cudaMalloc returns nullptr
  // if size is 0.  We just make sure it does.
L
liaogang 已提交
116
  if (size <= 0) return nullptr;
Y
Yu Yang 已提交
117

118
  void* p;
119
  auto result = platform::RecordedCudaMalloc(&p, size, gpu_id_);
Y
Yu Yang 已提交
120

L
liaogang 已提交
121
  if (result == cudaSuccess) {
Y
Yi Wang 已提交
122
    *index = 0;
123
    gpu_alloc_size_ += size;
L
liaogang 已提交
124
    return p;
125
  } else {
126 127 128 129 130 131 132 133 134 135 136 137 138 139
    size_t avail, total, actual_avail, actual_total;
    bool is_limited = platform::RecordedCudaMemGetInfo(
        &avail, &total, &actual_avail, &actual_total, gpu_id_);

    std::string err_msg;
    if (is_limited) {
      auto limit_size = (total >> 20);
      err_msg = string::Sprintf(
          "\n   3) Set environment variable `FLAGS_gpu_memory_limit_mb` to a "
          "larger value. Currently `FLAGS_gpu_memory_limit_mb` is %d, so the "
          "maximum GPU memory usage is limited to %d MB.\n"
          "      The command is `export FLAGS_gpu_memory_limit_mb=xxx`.",
          limit_size, limit_size);
    }
140

141
    PADDLE_THROW_BAD_ALLOC(platform::errors::ResourceExhausted(
142 143 144 145 146 147 148 149 150 151
        "\n\nOut of memory error on GPU %d. "
        "Cannot allocate %s memory on GPU %d, "
        "available memory is only %s.\n\n"
        "Please check whether there is any other process using GPU %d.\n"
        "1. If yes, please stop them, or start PaddlePaddle on another GPU.\n"
        "2. If no, please try one of the following suggestions:\n"
        "   1) Decrease the batch size of your model.\n"
        "   2) FLAGS_fraction_of_gpu_memory_to_use is %.2lf now, "
        "please set it to a higher value but less than 1.0.\n"
        "      The command is "
152
        "`export FLAGS_fraction_of_gpu_memory_to_use=xxx`.%s\n\n",
153 154
        gpu_id_, string::HumanReadableSize(size), gpu_id_,
        string::HumanReadableSize(avail), gpu_id_,
155
        FLAGS_fraction_of_gpu_memory_to_use, err_msg));
L
liaogang 已提交
156
  }
157 158
}

L
liaogang 已提交
159
void GPUAllocator::Free(void* p, size_t size, size_t index) {
160 161 162 163 164 165 166
  PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
                                  "The index should be 0, index is %d", index));
  PADDLE_ENFORCE_GE(gpu_alloc_size_, size,
                    platform::errors::InvalidArgument(
                        "The size of memory (%d) to free exceeds the size of "
                        "allocated gpu memory (%d)",
                        size, gpu_alloc_size_));
167
  gpu_alloc_size_ -= size;
168 169

  platform::RecordedCudaFree(p, size, gpu_id_);
170 171
}

L
liaogang 已提交
172
bool GPUAllocator::UseGpu() const { return true; }
L
liaogang 已提交
173

C
chengduoZH 已提交
174 175
// PINNED memory allows direct DMA transfers by the GPU to and from system
// memory. It’s locked to a physical address.
Y
Yi Wang 已提交
176
void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) {
C
chengduoZH 已提交
177
  if (size <= 0) return nullptr;
C
chengduoZH 已提交
178

179
  // NOTE: here, we use CUDAPinnedMaxAllocSize as the maximum memory size
C
chengduoZH 已提交
180
  // of host pinned allocation. Allocates too much would reduce
C
chengduoZH 已提交
181
  // the amount of memory available to the underlying system for paging.
C
chengduoZH 已提交
182
  size_t usable =
183
      paddle::platform::CUDAPinnedMaxAllocSize() - cuda_pinnd_alloc_size_;
C
chengduoZH 已提交
184

C
chengduoZH 已提交
185 186 187 188 189 190
  if (size > usable) {
    LOG(WARNING) << "Cannot malloc " << size / 1024.0 / 1024.0
                 << " MB pinned memory."
                 << ", available " << usable / 1024.0 / 1024.0 << " MB";
    return nullptr;
  }
C
chengduoZH 已提交
191

C
chengduoZH 已提交
192
  void* p;
C
chengduoZH 已提交
193
  // PINNED memory is visible to all CUDA contexts.
D
Dun Liang 已提交
194
  cudaError_t result = cudaHostAlloc(&p, size, cudaHostAllocPortable);
C
chengduoZH 已提交
195

C
chengduoZH 已提交
196
  if (result == cudaSuccess) {
Y
Yi Wang 已提交
197
    *index = 1;  // PINNED memory
C
chengduoZH 已提交
198
    cuda_pinnd_alloc_size_ += size;
C
chengduoZH 已提交
199
    return p;
C
chengduoZH 已提交
200
  } else {
D
Dun Liang 已提交
201
    LOG(WARNING) << "cudaHostAlloc failed.";
C
chengduoZH 已提交
202
    return nullptr;
C
chengduoZH 已提交
203 204 205 206 207 208 209
  }

  return nullptr;
}

void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) {
  cudaError_t err;
210 211 212 213 214 215 216 217
  PADDLE_ENFORCE_EQ(index, 1, platform::errors::InvalidArgument(
                                  "The index should be 1, but got %d", index));

  PADDLE_ENFORCE_GE(cuda_pinnd_alloc_size_, size,
                    platform::errors::InvalidArgument(
                        "The size of memory (%d) to free exceeds the size of "
                        "allocated cuda pinned memory (%d)",
                        size, cuda_pinnd_alloc_size_));
C
chengduoZH 已提交
218
  cuda_pinnd_alloc_size_ -= size;
C
chengduoZH 已提交
219 220 221
  err = cudaFreeHost(p);

  // Purposefully allow cudaErrorCudartUnloading, because
C
chengduoZH 已提交
222
  // that is returned if you ever call cudaFreeHost after the
C
chengduoZH 已提交
223 224
  // driver has already shutdown. This happens only if the
  // process is terminating, in which case we don't care if
C
chengduoZH 已提交
225
  // cudaFreeHost succeeds.
C
chengduoZH 已提交
226
  if (err != cudaErrorCudartUnloading) {
227 228 229 230 231
    PADDLE_ENFORCE_EQ(
        err, 0,
        platform::errors::Fatal(
            "cudaFreeHost failed in GPUPinnedAllocator, error code is %d",
            err));
C
chengduoZH 已提交
232 233 234
  }
}

C
chengduoZH 已提交
235
bool CUDAPinnedAllocator::UseGpu() const { return false; }
C
chengduoZH 已提交
236

L
Luo Tao 已提交
237
#endif
238 239 240 241

}  // namespace detail
}  // namespace memory
}  // namespace paddle