system_allocator.cc 16.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14
#define GLOG_NO_ABBREVIATED_SEVERITIES
15

Y
Yi Wang 已提交
16
#include "paddle/fluid/memory/detail/system_allocator.h"
17

18 19
#include "paddle/fluid/memory/stats.h"

D
dzhwinter 已提交
20 21
#ifdef _WIN32
#include <malloc.h>
22 23 24
#ifndef NOMINMAX
#define NOMINMAX  // msvc max/min macro conflict with std::min/max
#endif
D
dzhwinter 已提交
25 26
#include <windows.h>  // VirtualLock/VirtualUnlock
#else
27
#include <sys/mman.h>  // for mlock and munlock
D
dzhwinter 已提交
28
#endif
29
#include "gflags/gflags.h"
30
#include "paddle/fluid/memory/allocation/allocator.h"
Y
Yi Wang 已提交
31
#include "paddle/fluid/platform/cpu_info.h"
32
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
33
#include "paddle/fluid/platform/device/npu/npu_info.h"
Y
Yi Wang 已提交
34
#include "paddle/fluid/platform/enforce.h"
F
fwenguang 已提交
35 36 37
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/mlu_info.h"
#endif
38

39
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
40 41
#include "paddle/fluid/platform/cuda_device_guard.h"
#endif
42

43 44
#include "paddle/fluid/platform/device/device_wrapper.h"

S
sneaxiy 已提交
45
DECLARE_bool(use_pinned_memory);
46
DECLARE_double(fraction_of_gpu_memory_to_use);
47 48
DECLARE_uint64(initial_gpu_memory_in_mb);
DECLARE_uint64(reallocate_gpu_memory_in_mb);
Z
zhhsplendid 已提交
49

50 51 52 53
namespace paddle {
namespace memory {
namespace detail {

D
dzhwinter 已提交
54
void* AlignedMalloc(size_t size) {
G
gongweibao 已提交
55
  void* p = nullptr;
D
dzhwinter 已提交
56
  size_t alignment = 32ul;
T
tensor-tang 已提交
57
#ifdef PADDLE_WITH_MKLDNN
58
  // refer to https://github.com/01org/mkl-dnn/blob/master/include/dnnl.hpp
59
  // memory alignment
D
dzhwinter 已提交
60 61 62 63
  alignment = 4096ul;
#endif
#ifdef _WIN32
  p = _aligned_malloc(size, alignment);
64
#else
65 66 67 68 69
  int error = posix_memalign(&p, alignment, size);
  PADDLE_ENFORCE_EQ(
      error, 0,
      platform::errors::ResourceExhausted(
          "Fail to alloc memory of %ld size, error code is %d.", size, error));
70
#endif
71 72
  PADDLE_ENFORCE_NOT_NULL(p, platform::errors::ResourceExhausted(
                                 "Fail to alloc memory of %ld size.", size));
D
dzhwinter 已提交
73 74 75 76 77 78 79 80 81 82 83 84
  return p;
}

void* CPUAllocator::Alloc(size_t* index, size_t size) {
  // According to http://www.cplusplus.com/reference/cstdlib/malloc/,
  // malloc might not return nullptr if size is zero, but the returned
  // pointer shall not be dereferenced -- so we make it nullptr.
  if (size <= 0) return nullptr;

  *index = 0;  // unlock memory

  void* p = AlignedMalloc(size);
85 86 87

  if (p != nullptr) {
    if (FLAGS_use_pinned_memory) {
Y
Yi Wang 已提交
88
      *index = 1;
D
dzhwinter 已提交
89 90 91
#ifdef _WIN32
      VirtualLock(p, size);
#else
92
      mlock(p, size);  // lock memory
D
dzhwinter 已提交
93
#endif
94
    }
95
  }
96

97 98
  HOST_MEMORY_STAT_UPDATE(Reserved, 0, size);

99 100 101
  return p;
}

L
liaogang 已提交
102
void CPUAllocator::Free(void* p, size_t size, size_t index) {
103
  if (p != nullptr && index == 1) {
D
dzhwinter 已提交
104 105 106
#ifdef _WIN32
    VirtualUnlock(p, size);
#else
107
    munlock(p, size);
D
dzhwinter 已提交
108
#endif
109
  }
P
peizhilin 已提交
110 111 112
#ifdef _WIN32
  _aligned_free(p);
#else
113
  free(p);
P
peizhilin 已提交
114
#endif
115 116

  HOST_MEMORY_STAT_UPDATE(Reserved, 0, -size);
117 118
}

L
liaogang 已提交
119
bool CPUAllocator::UseGpu() const { return false; }
L
liaogang 已提交
120

121
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
122

Y
Yi Wang 已提交
123
void* GPUAllocator::Alloc(size_t* index, size_t size) {
124 125
  // CUDA documentation doesn't explain if cudaMalloc returns nullptr
  // if size is 0.  We just make sure it does.
L
liaogang 已提交
126
  if (size <= 0) return nullptr;
Y
Yu Yang 已提交
127

128
  void* p;
129
  auto result = platform::RecordedGpuMalloc(&p, size, gpu_id_);
Y
Yu Yang 已提交
130

131
  if (result == gpuSuccess) {
Y
Yi Wang 已提交
132
    *index = 0;
133
    gpu_alloc_size_ += size;
L
liaogang 已提交
134
    return p;
135
  } else {
136
    size_t avail, total, actual_avail, actual_total;
137
    bool is_limited = platform::RecordedGpuMemGetInfo(
138
        &avail, &total, &actual_avail, &actual_total, gpu_id_);
139
    size_t allocated = total - avail;
140 141 142 143 144 145 146 147 148 149 150

    std::string err_msg;
    if (is_limited) {
      auto limit_size = (total >> 20);
      err_msg = string::Sprintf(
          "\n   3) Set environment variable `FLAGS_gpu_memory_limit_mb` to a "
          "larger value. Currently `FLAGS_gpu_memory_limit_mb` is %d, so the "
          "maximum GPU memory usage is limited to %d MB.\n"
          "      The command is `export FLAGS_gpu_memory_limit_mb=xxx`.",
          limit_size, limit_size);
    }
151

152
    PADDLE_THROW_BAD_ALLOC(platform::errors::ResourceExhausted(
153
        "\n\nOut of memory error on GPU %d. "
154
        "Cannot allocate %s memory on GPU %d, %s memory has been allocated and "
155 156 157 158 159 160 161 162
        "available memory is only %s.\n\n"
        "Please check whether there is any other process using GPU %d.\n"
        "1. If yes, please stop them, or start PaddlePaddle on another GPU.\n"
        "2. If no, please try one of the following suggestions:\n"
        "   1) Decrease the batch size of your model.\n"
        "   2) FLAGS_fraction_of_gpu_memory_to_use is %.2lf now, "
        "please set it to a higher value but less than 1.0.\n"
        "      The command is "
163
        "`export FLAGS_fraction_of_gpu_memory_to_use=xxx`.%s\n\n",
164
        gpu_id_, string::HumanReadableSize(size), gpu_id_,
165 166
        string::HumanReadableSize(allocated), string::HumanReadableSize(avail),
        gpu_id_, FLAGS_fraction_of_gpu_memory_to_use, err_msg));
L
liaogang 已提交
167
  }
168 169
}

L
liaogang 已提交
170
void GPUAllocator::Free(void* p, size_t size, size_t index) {
171 172 173
  PADDLE_ENFORCE_EQ(index, 0,
                    platform::errors::InvalidArgument(
                        "The index should be 0, index is %d", index));
174 175 176 177 178
  PADDLE_ENFORCE_GE(gpu_alloc_size_, size,
                    platform::errors::InvalidArgument(
                        "The size of memory (%d) to free exceeds the size of "
                        "allocated gpu memory (%d)",
                        size, gpu_alloc_size_));
179
  gpu_alloc_size_ -= size;
180

181
  platform::RecordedGpuFree(p, size, gpu_id_);
182 183
}

L
liaogang 已提交
184
bool GPUAllocator::UseGpu() const { return true; }
L
liaogang 已提交
185

C
chengduoZH 已提交
186 187
// PINNED memory allows direct DMA transfers by the GPU to and from system
// memory. It’s locked to a physical address.
Y
Yi Wang 已提交
188
void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) {
C
chengduoZH 已提交
189
  if (size <= 0) return nullptr;
C
chengduoZH 已提交
190

191
  // NOTE: here, we use CUDAPinnedMaxAllocSize as the maximum memory size
C
chengduoZH 已提交
192
  // of host pinned allocation. Allocates too much would reduce
C
chengduoZH 已提交
193
  // the amount of memory available to the underlying system for paging.
C
chengduoZH 已提交
194
  size_t usable =
195
      paddle::platform::CUDAPinnedMaxAllocSize() - cuda_pinnd_alloc_size_;
C
chengduoZH 已提交
196

C
chengduoZH 已提交
197 198 199 200 201 202
  if (size > usable) {
    LOG(WARNING) << "Cannot malloc " << size / 1024.0 / 1024.0
                 << " MB pinned memory."
                 << ", available " << usable / 1024.0 / 1024.0 << " MB";
    return nullptr;
  }
C
chengduoZH 已提交
203

C
chengduoZH 已提交
204
  void* p;
205 206
// PINNED memory is visible to all CUDA contexts.
#ifdef PADDLE_WITH_HIP
207
  hipError_t result = hipHostMalloc(&p, size, hipHostMallocPortable);
208
#else
D
Dun Liang 已提交
209
  cudaError_t result = cudaHostAlloc(&p, size, cudaHostAllocPortable);
210
#endif
C
chengduoZH 已提交
211

212
  if (result == gpuSuccess) {
Y
Yi Wang 已提交
213
    *index = 1;  // PINNED memory
C
chengduoZH 已提交
214
    cuda_pinnd_alloc_size_ += size;
215
    HOST_MEMORY_STAT_UPDATE(Reserved, 0, size);
C
chengduoZH 已提交
216
    return p;
C
chengduoZH 已提交
217
  } else {
D
Dun Liang 已提交
218
    LOG(WARNING) << "cudaHostAlloc failed.";
C
chengduoZH 已提交
219
    return nullptr;
C
chengduoZH 已提交
220 221 222 223 224 225
  }

  return nullptr;
}

void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) {
226
  gpuError_t err;
227 228 229
  PADDLE_ENFORCE_EQ(index, 1,
                    platform::errors::InvalidArgument(
                        "The index should be 1, but got %d", index));
230 231 232 233 234 235

  PADDLE_ENFORCE_GE(cuda_pinnd_alloc_size_, size,
                    platform::errors::InvalidArgument(
                        "The size of memory (%d) to free exceeds the size of "
                        "allocated cuda pinned memory (%d)",
                        size, cuda_pinnd_alloc_size_));
C
chengduoZH 已提交
236
  cuda_pinnd_alloc_size_ -= size;
237 238 239 240 241 242 243 244 245
#ifdef PADDLE_WITH_HIP
  err = hipHostFree(p);
  if (err != hipErrorDeinitialized) {
    PADDLE_ENFORCE_EQ(
        err, hipSuccess,
        platform::errors::Fatal(
            "hipFreeHost failed in GPUPinnedAllocator, error code is %d", err));
  }
#else
C
chengduoZH 已提交
246 247 248
  err = cudaFreeHost(p);

  // Purposefully allow cudaErrorCudartUnloading, because
C
chengduoZH 已提交
249
  // that is returned if you ever call cudaFreeHost after the
C
chengduoZH 已提交
250 251
  // driver has already shutdown. This happens only if the
  // process is terminating, in which case we don't care if
C
chengduoZH 已提交
252
  // cudaFreeHost succeeds.
C
chengduoZH 已提交
253
  if (err != cudaErrorCudartUnloading) {
254 255 256 257 258
    PADDLE_ENFORCE_EQ(
        err, 0,
        platform::errors::Fatal(
            "cudaFreeHost failed in GPUPinnedAllocator, error code is %d",
            err));
C
chengduoZH 已提交
259
  }
260
#endif
261
  HOST_MEMORY_STAT_UPDATE(Reserved, 0, -size);
C
chengduoZH 已提交
262 263
}

C
chengduoZH 已提交
264
bool CUDAPinnedAllocator::UseGpu() const { return false; }
C
chengduoZH 已提交
265

L
Luo Tao 已提交
266
#endif
267

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
#ifdef PADDLE_WITH_ASCEND_CL
void* NPUAllocator::Alloc(size_t* index, size_t size) {
  if (size <= 0) return nullptr;

  void* p;
  auto result = platform::RecordedNPUMalloc(&p, size, npu_id_);

  if (result == ACL_ERROR_NONE) {
    *index = 0;
    npu_alloc_size_ += size;
    return p;
  } else {
    size_t avail, total, actual_avail, actual_total;
    bool is_limited = platform::RecordedNPUMemGetInfo(
        &avail, &total, &actual_avail, &actual_total, npu_id_);

    std::string err_msg;
    if (is_limited) {
      auto limit_size = (total >> 20);
      err_msg = string::Sprintf(
          "\n   3) Set environment variable `FLAGS_gpu_memory_limit_mb` to a "
          "larger value. Currently `FLAGS_gpu_memory_limit_mb` is %d, so the "
          "maximum GPU memory usage is limited to %d MB.\n"
          "      The command is `export FLAGS_gpu_memory_limit_mb=xxx`.",
          limit_size, limit_size);
    }

    PADDLE_THROW_BAD_ALLOC(platform::errors::ResourceExhausted(
        "\n\nOut of memory error on NPU %d. "
        "Cannot allocate %s memory on NPU %d, "
        "available memory is only %s.\n\n"
        "Please check whether there is any other process using NPU %d.\n"
        "1. If yes, please stop them, or start PaddlePaddle on another NPU.\n"
        "2. If no, please try one of the following suggestions:\n"
        "   1) Decrease the batch size of your model.\n"
        "   2) FLAGS_fraction_of_gpu_memory_to_use is %.2lf now, "
        "please set it to a higher value but less than 1.0.\n"
        "      The command is "
        "`export FLAGS_fraction_of_gpu_memory_to_use=xxx`.%s\n\n",
        npu_id_, string::HumanReadableSize(size), npu_id_,
        string::HumanReadableSize(avail), npu_id_,
        FLAGS_fraction_of_gpu_memory_to_use, err_msg));
  }
}

void NPUAllocator::Free(void* p, size_t size, size_t index) {
  VLOG(4) << "Free " << p << " size " << size;
315 316 317
  PADDLE_ENFORCE_EQ(index, 0,
                    platform::errors::InvalidArgument(
                        "The index should be 0, index is %d", index));
318 319 320 321 322 323 324 325 326 327 328
  PADDLE_ENFORCE_GE(npu_alloc_size_, size,
                    platform::errors::InvalidArgument(
                        "The size of memory (%d) to free exceeds the size of "
                        "allocated gpu memory (%d)",
                        size, npu_alloc_size_));
  npu_alloc_size_ -= size;

  platform::RecordedNPUFree(p, size, npu_id_);
}

bool NPUAllocator::UseGpu() const { return true; }
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344

void* NPUPinnedAllocator::Alloc(size_t* index, size_t size) {
  if (size <= 0) return nullptr;

  size_t usable =
      paddle::platform::NPUPinnedMaxAllocSize() - npu_pinnd_alloc_size_;

  if (size > usable) {
    LOG(WARNING) << "Cannot malloc " << size / 1024.0 / 1024.0
                 << " MB pinned memory."
                 << ", available " << usable / 1024.0 / 1024.0 << " MB";
    return nullptr;
  }

  void* p;
  // PINNED memory is visible to all NPU contexts.
345
  auto result = platform::NPUHostMalloc(&p, size);
346 347 348 349 350 351

  if (result == ACL_ERROR_NONE) {
    *index = 1;  // PINNED memory
    npu_pinnd_alloc_size_ += size;
    return p;
  } else {
352
    LOG(WARNING) << "NPUHostMalloc failed.";
353 354 355 356 357 358 359 360
    return nullptr;
  }

  return nullptr;
}

void NPUPinnedAllocator::Free(void* p, size_t size, size_t index) {
  aclError err;
361 362 363
  PADDLE_ENFORCE_EQ(index, 1,
                    platform::errors::InvalidArgument(
                        "The index should be 1, but got %d", index));
364 365 366 367 368 369 370

  PADDLE_ENFORCE_GE(npu_pinnd_alloc_size_, size,
                    platform::errors::InvalidArgument(
                        "The size of memory (%d) to free exceeds the size of "
                        "allocated npu pinned memory (%d)",
                        size, npu_pinnd_alloc_size_));
  npu_pinnd_alloc_size_ -= size;
371
  err = platform::NPUHostFree(p);
372 373 374 375 376

  if (err != ACL_ERROR_NONE) {
    PADDLE_ENFORCE_EQ(
        err, 0,
        platform::errors::Fatal(
377
            "NPUHostFree failed in NPUPinnedAllocator, error code is %d", err));
378 379 380 381 382
  }
}

bool NPUPinnedAllocator::UseGpu() const { return false; }

383 384
#endif

F
fwenguang 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
#ifdef PADDLE_WITH_MLU
void* MLUAllocator::Alloc(size_t* index, size_t size) {
  if (size <= 0) return nullptr;

  void* p;
  auto result = platform::RecordedMLUMalloc(&p, size, mlu_id_);

  if (result == cnrtSuccess) {
    *index = 0;
    mlu_alloc_size_ += size;
    return p;
  } else {
    size_t avail, total, actual_avail, actual_total;
    bool is_limited = platform::RecordedMLUMemGetInfo(
        &avail, &total, &actual_avail, &actual_total, mlu_id_);
    size_t allocated = total - avail;

    std::string err_msg;
    if (is_limited) {
      auto limit_size = (total >> 20);
      err_msg = string::Sprintf(
          "\n   3) Set environment variable `FLAGS_gpu_memory_limit_mb` to a "
          "larger value. Currently `FLAGS_gpu_memory_limit_mb` is %d, so the "
          "maximum MLU memory usage is limited to %d MB.\n"
          "      The command is `export FLAGS_gpu_memory_limit_mb=xxx`.",
          limit_size, limit_size);
    }

    PADDLE_THROW_BAD_ALLOC(platform::errors::ResourceExhausted(
        "\n\nOut of memory error on MLU %d. "
        "Cannot allocate %s memory on MLU %d, %s memory has been allocated and "
        "available memory is only %s.\n\n"
        "Please check whether there is any other process using MLU %d.\n"
        "1. If yes, please stop them, or start PaddlePaddle on another MLU.\n"
        "2. If no, please try one of the following suggestions:\n"
        "   1) Decrease the batch size of your model.\n"
        "   2) FLAGS_fraction_of_gpu_memory_to_use is %.2lf now, "
        "please set it to a higher value but less than 1.0.\n"
        "      The command is "
        "`export FLAGS_fraction_of_gpu_memory_to_use=xxx`.%s\n\n",
        mlu_id_, string::HumanReadableSize(size), mlu_id_,
        string::HumanReadableSize(allocated), string::HumanReadableSize(avail),
        mlu_id_, FLAGS_fraction_of_gpu_memory_to_use, err_msg));
  }
}

void MLUAllocator::Free(void* p, size_t size, size_t index) {
432 433 434
  PADDLE_ENFORCE_EQ(index, 0,
                    platform::errors::InvalidArgument(
                        "The index should be 0, index is %d", index));
F
fwenguang 已提交
435 436 437 438 439 440 441 442 443 444 445 446 447
  PADDLE_ENFORCE_GE(mlu_alloc_size_, size,
                    platform::errors::InvalidArgument(
                        "The size of memory (%d) to free exceeds the size of "
                        "allocated gpu memory (%d)",
                        size, mlu_alloc_size_));
  mlu_alloc_size_ -= size;

  platform::RecordedMLUFree(p, size, mlu_id_);
}

bool MLUAllocator::UseGpu() const { return true; }
#endif

448 449 450 451 452 453
#ifdef PADDLE_WITH_CUSTOM_DEVICE
void* CustomAllocator::Alloc(size_t* index, size_t size) {
  if (size <= 0) return nullptr;

  void* p;
  auto place = platform::CustomPlace(dev_type_, dev_id_);
454
  auto device = phi::DeviceManager::GetDeviceWithPlace(place);
455 456 457 458 459 460 461 462
  p = device->MemoryAllocate(size);
  if (LIKELY(p)) {
    VLOG(4) << "CustomAllocator::Alloc " << p << " size " << size;
    *index = 0;
    plug_alloc_size += size;
  } else {
    size_t avail, total;

463
    phi::DeviceManager::MemoryStats(place, &total, &avail);
464 465 466 467 468 469 470 471 472 473 474 475 476
    PADDLE_THROW_BAD_ALLOC(platform::errors::ResourceExhausted(
        "\n\nOut of memory error on %s %d. "
        "total memory is %s, used memory is %s, "
        "available memory is only %s.\n\n",
        dev_type_, dev_id_, string::HumanReadableSize(total),
        string::HumanReadableSize(total - avail),
        string::HumanReadableSize(avail)));
  }
  return p;
}

void CustomAllocator::Free(void* p, size_t size, size_t index) {
  VLOG(4) << "CustomAllocator::Free " << p << " size " << size;
477 478 479
  PADDLE_ENFORCE_EQ(index, 0,
                    platform::errors::InvalidArgument(
                        "The index should be 0, index is %d", index));
480 481 482 483 484 485 486
  PADDLE_ENFORCE_GE(plug_alloc_size, size,
                    platform::errors::InvalidArgument(
                        "The size of memory (%d) to free exceeds the size of "
                        "allocated gpu memory (%d)",
                        size, plug_alloc_size));
  plug_alloc_size -= size;
  auto place = platform::CustomPlace(dev_type_, dev_id_);
487
  auto device = phi::DeviceManager::GetDeviceWithPlace(place);
488 489 490 491 492 493
  device->MemoryDeallocate(p, size);
}

bool CustomAllocator::UseGpu() const { return true; }
#endif

494 495 496
}  // namespace detail
}  // namespace memory
}  // namespace paddle