allocator_facade.cc 35.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16
#include "paddle/fluid/memory/allocation/allocator_facade.h"

17
#include "gflags/gflags.h"
18
#include "paddle/fluid/memory/allocation/aligned_allocator.h"
19
#include "paddle/fluid/memory/allocation/allocator.h"
Y
Yu Yang 已提交
20
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
21
#include "paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.h"
22
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
23
#include "paddle/fluid/memory/allocation/naive_best_fit_allocator.h"
S
sneaxiy 已提交
24
#include "paddle/fluid/memory/allocation/retry_allocator.h"
S
sneaxiy 已提交
25
#include "paddle/fluid/platform/enforce.h"
26
#include "paddle/fluid/platform/place.h"
27

28
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
29
#include "paddle/fluid/memory/allocation/cuda_allocator.h"
S
sneaxiy 已提交
30
#include "paddle/fluid/memory/allocation/pinned_allocator.h"
31
#include "paddle/fluid/memory/allocation/stream_safe_cuda_allocator.h"
32
#include "paddle/fluid/memory/allocation/thread_local_allocator.h"
33
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
34
#include "paddle/fluid/platform/device_context.h"
35 36

#ifdef PADDLE_WITH_CUDA
37
#include "paddle/fluid/platform/device/gpu/cuda/cuda_graph.h"
38
#endif
39

40 41 42 43 44
#if CUDA_VERSION >= 10020
#include "paddle/fluid/memory/allocation/cuda_virtual_mem_allocator.h"
#include "paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.h"
#include "paddle/fluid/platform/dynload/cuda_driver.h"
#endif
45
#endif
46

47
#ifdef PADDLE_WITH_XPU
48
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
49
#endif
50 51 52 53

#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/memory/allocation/npu_pinned_allocator.h"
#endif
54

J
jianghaicheng 已提交
55 56 57 58
#ifdef PADDLE_WITH_IPU
#include "paddle/fluid/platform/device/ipu/ipu_info.h"
#endif

Z
Zeng Jinle 已提交
59
PADDLE_DEFINE_EXPORTED_int64(
60
    gpu_allocator_retry_time, 10000,
S
sneaxiy 已提交
61 62 63
    "The retry time (milliseconds) when allocator fails "
    "to allocate memory. No retry if this value is not greater than 0");

Z
Zeng Jinle 已提交
64 65 66 67
PADDLE_DEFINE_EXPORTED_bool(
    use_system_allocator, false,
    "Whether to use system allocator to allocate CPU and GPU memory. "
    "Only used for unittests.");
68

69 70 71
PADDLE_DEFINE_EXPORTED_bool(use_virtual_memory_auto_growth, false,
                            "Use VirtualMemoryAutoGrowthBestFitAllocator.");

72 73 74
// NOTE(Ruibiao): This FLAGS is just to be compatibled with
// the old single-stream CUDA allocator. It will be removed
// after StreamSafeCudaAllocator has been fully tested.
75
PADDLE_DEFINE_EXPORTED_bool(use_stream_safe_cuda_allocator, false,
76 77
                            "Enable StreamSafeCUDAAllocator");

78 79
DECLARE_string(allocator_strategy);

80 81 82 83
namespace paddle {
namespace memory {
namespace allocation {

84 85 86 87 88 89 90 91 92
#ifdef PADDLE_WITH_CUDA
class CUDAGraphAllocator
    : public Allocator,
      public std::enable_shared_from_this<CUDAGraphAllocator> {
 private:
  class PrivateAllocation : public Allocation {
   public:
    PrivateAllocation(CUDAGraphAllocator* allocator,
                      AllocationPtr underlying_allocation)
F
From00 已提交
93 94 95
        : Allocation(
              underlying_allocation->ptr(), underlying_allocation->base_ptr(),
              underlying_allocation->size(), underlying_allocation->place()),
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
          allocator_(allocator->shared_from_this()),
          underlying_allocation_(std::move(underlying_allocation)) {}

   private:
    std::shared_ptr<Allocator> allocator_;
    AllocationPtr underlying_allocation_;
  };

  explicit CUDAGraphAllocator(const std::shared_ptr<Allocator>& allocator)
      : underlying_allocator_(allocator) {}

 public:
  static std::shared_ptr<Allocator> Create(
      const std::shared_ptr<Allocator>& allocator) {
    return std::shared_ptr<Allocator>(new CUDAGraphAllocator(allocator));
  }

 protected:
  Allocation* AllocateImpl(size_t size) {
    VLOG(10) << "Allocate " << size << " for CUDA Graph";
    return new PrivateAllocation(this, underlying_allocator_->Allocate(size));
  }

  void FreeImpl(Allocation* allocation) {
    VLOG(10) << "delete for CUDA Graph";
    delete allocation;
  }

 private:
  std::shared_ptr<Allocator> underlying_allocator_;
};
#endif

Y
Yu Yang 已提交
129 130
class AllocatorFacadePrivate {
 public:
131 132
  using AllocatorMap = std::map<platform::Place, std::shared_ptr<Allocator>>;

133 134 135 136 137 138
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  using CUDAAllocatorMap =
      std::map<platform::CUDAPlace,
               std::map<gpuStream_t, std::shared_ptr<Allocator>>>;
#endif

139 140 141
  explicit AllocatorFacadePrivate(bool allow_free_idle_chunk = true) {
    strategy_ = GetAllocatorStrategy();
    switch (strategy_) {
142 143
      case AllocatorStrategy::kNaiveBestFit: {
        InitNaiveBestFitCPUAllocator();
J
jianghaicheng 已提交
144 145 146 147 148
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
149
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
150 151 152 153 154
        if (FLAGS_use_stream_safe_cuda_allocator) {
          LOG(WARNING) << "FLAGS_use_stream_safe_cuda_allocator is invalid for "
                          "naive_best_fit strategy";
          FLAGS_use_stream_safe_cuda_allocator = false;
        }
155
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
156 157 158
          InitNaiveBestFitCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
159
#endif
160 161 162 163 164
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
165 166 167 168
#ifdef PADDLE_WITH_ASCEND_CL
        for (int dev_id = 0; dev_id < platform::GetNPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitNPUAllocator(platform::NPUPlace(dev_id));
        }
169
        InitNaiveBestFitNPUPinnedAllocator();
170
#endif
Z
Zeng Jinle 已提交
171 172
        break;
      }
173 174 175

      case AllocatorStrategy::kAutoGrowth: {
        InitNaiveBestFitCPUAllocator();
176 177 178
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
        allow_free_idle_chunk_ = allow_free_idle_chunk;
        if (FLAGS_use_stream_safe_cuda_allocator) {
179 180
          default_streams_ =
              std::vector<gpuStream_t>(platform::GetGPUDeviceCount(), nullptr);
181
          // TODO(Ruibiao): Support multi-stream allocator for other strategies
182
          for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount();
183
               ++dev_id) {
184
            InitStreamSafeCUDAAllocator(platform::CUDAPlace(dev_id), nullptr);
185 186
          }
        } else {
187
          for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount();
188 189 190 191 192 193 194
               ++dev_id) {
            InitAutoGrowthCUDAAllocator(platform::CUDAPlace(dev_id),
                                        allow_free_idle_chunk_);
          }
        }
        InitNaiveBestFitCUDAPinnedAllocator();
#endif
195 196 197 198
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
J
jianghaicheng 已提交
199 200 201 202 203
#endif
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
204
#endif
Z
Zeng Jinle 已提交
205 206
        break;
      }
207

208 209
      case AllocatorStrategy::kThreadLocal: {
        InitNaiveBestFitCPUAllocator();
210 211 212 213 214
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
J
jianghaicheng 已提交
215 216 217 218 219
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
220
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
221 222 223 224 225 226
        if (FLAGS_use_stream_safe_cuda_allocator) {
          LOG(WARNING) << "FLAGS_use_stream_safe_cuda_allocator is invalid for "
                          "thread_local strategy";
          FLAGS_use_stream_safe_cuda_allocator = false;
        }

227
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
228 229 230 231 232 233 234
          InitThreadLocalCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
#endif
        break;
      }

Z
Zeng Jinle 已提交
235
      default: {
236
        PADDLE_THROW(platform::errors::InvalidArgument(
237
            "Unsupported allocator strategy: %d", static_cast<int>(strategy_)));
Z
Zeng Jinle 已提交
238
      }
Y
Yu Yang 已提交
239
    }
Z
Zeng Jinle 已提交
240
    InitZeroSizeAllocators();
241
    InitSystemAllocators();
242 243 244 245 246 247

    if (FLAGS_gpu_allocator_retry_time > 0) {
      WrapCUDARetryAllocator(FLAGS_gpu_allocator_retry_time);
    }

    CheckAllocThreadSafe();
Z
Zeng Jinle 已提交
248 249 250 251
  }

  inline const std::shared_ptr<Allocator>& GetAllocator(
      const platform::Place& place, size_t size) {
252
    VLOG(6) << "GetAllocator"
L
Leo Chen 已提交
253
            << " " << place << " " << size;
254 255
    const auto& allocators =
        (size > 0 ? (UNLIKELY(FLAGS_use_system_allocator) ? system_allocators_
256
                                                          : GetAllocatorMap())
257
                  : zero_size_allocators_);
Z
Zeng Jinle 已提交
258
    auto iter = allocators.find(place);
259 260 261
    PADDLE_ENFORCE_NE(iter, allocators.end(),
                      platform::errors::NotFound(
                          "No allocator found for the place, %s", place));
Z
Zeng Jinle 已提交
262
    return iter->second;
263 264
  }

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  const std::shared_ptr<Allocator>& GetAllocator(
      const platform::CUDAPlace& place, const gpuStream_t& stream,
      bool create_if_not_found = false) {
    auto place_it = cuda_allocators_.find(place);
    PADDLE_ENFORCE_NE(place_it, cuda_allocators_.end(),
                      platform::errors::NotFound(
                          "No allocator found for the place %s", place));

    const std::map<gpuStream_t, std::shared_ptr<Allocator>>& allocator_map =
        place_it->second;
    auto stream_it = allocator_map.find(stream);
    if (stream_it == allocator_map.end()) {
      if (create_if_not_found) {
        InitStreamSafeCUDAAllocator(place, stream);
        return cuda_allocators_[place][stream];
      } else {
        PADDLE_THROW(platform::errors::NotFound(
            "No allocator found for stream %s in place %s", stream, place));
      }
    }
    return stream_it->second;
  }

289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
  const gpuStream_t& GetDefaultStream(const platform::CUDAPlace& place) {
    int dev_id = place.GetDeviceId();
    gpuStream_t& default_stream = default_streams_[dev_id];
    if (UNLIKELY(default_stream == nullptr)) {
      /* NOTE(Ruibiao): Here if we set default_stream by code " default_stream =
       * platform::stream::get_current_stream(place.GetDeviceId())->raw_stream()
       * ", then it will be fail to make target 'jit_kernel_benchmark', says a
       * undefined reference to `paddle::platform::DeviceContextPool::Get(
       * paddle::platform::Place const&)' in function
       * `paddle::platform::stream::get_current_stream(int)'. However, target
       * allocator_facade will not be affected. It seems a circular dependency
       * problem between 'cuda_stream' and 'device_context' that causes this
       * strange bug.
       */
      platform::DeviceContextPool& pool =
          platform::DeviceContextPool::Instance();
      default_stream =
          static_cast<platform::CUDADeviceContext*>(pool.Get(place))->stream();
      InitStreamSafeCUDAAllocator(place, default_stream);
    }
    return default_stream;
  }
311

312 313
  void RecordStream(std::shared_ptr<Allocation> allocation,
                    const gpuStream_t& stream) {
314 315 316 317
    if (allocation->size() == 0) {
      return;
    }

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
    StreamSafeCUDAAllocation* stream_safe_cuda_allocation =
        dynamic_cast<StreamSafeCUDAAllocation*>(allocation.get());
    PADDLE_ENFORCE_NOT_NULL(stream_safe_cuda_allocation,
                            platform::errors::InvalidArgument(
                                "Failed to dynamic cast %p from Allocation* to "
                                "StreamSafeCUDAAllocation*",
                                allocation.get()));
    stream_safe_cuda_allocation->RecordStream(stream);
  }

  const gpuStream_t& GetStream(
      const std::shared_ptr<Allocation>& allocation) const {
    const StreamSafeCUDAAllocation* stream_safe_cuda_allocation =
        dynamic_cast<const StreamSafeCUDAAllocation*>(allocation.get());
    PADDLE_ENFORCE_NOT_NULL(stream_safe_cuda_allocation,
                            platform::errors::InvalidArgument(
                                "Failed to dynamic cast %p from Allocation* to "
                                "StreamSafeCUDAAllocation*",
                                allocation.get()));
    return stream_safe_cuda_allocation->GetOwningStream();
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
  }

#ifdef PADDLE_WITH_CUDA
  void PrepareMemoryPoolForCUDAGraph(CUDAGraphID id) {
    PADDLE_ENFORCE_EQ(strategy_, AllocatorStrategy::kAutoGrowth,
                      platform::errors::InvalidArgument(
                          "CUDA Graph is only supported when the "
                          "FLAGS_allocator_strategy=\"auto_growth\", but got "
                          "FLAGS_allocator_strategy=\"%s\"",
                          FLAGS_allocator_strategy));
    auto& allocator = cuda_graph_allocator_map_[id];
    PADDLE_ENFORCE_EQ(
        allocator.get(), nullptr,
        platform::errors::InvalidArgument(
            "The memory pool of the CUDA Graph with ID %d have been prepared.",
            id));
    allocator.reset(
        new AllocatorFacadePrivate(/*allow_free_idle_chunk=*/false));
    for (auto& item : allocator->allocators_) {
      auto& old_allocator = item.second;
      old_allocator = CUDAGraphAllocator::Create(old_allocator);
359
    }
360 361 362 363 364 365 366 367 368 369 370
    VLOG(10) << "Prepare memory pool for CUDA Graph with ID " << id;
  }

  void RemoveMemoryPoolOfCUDAGraph(CUDAGraphID id) {
    auto iter = cuda_graph_allocator_map_.find(id);
    PADDLE_ENFORCE_NE(iter, cuda_graph_allocator_map_.end(),
                      platform::errors::InvalidArgument(
                          "Cannot find CUDA Graph with ID = %d", id));
    cuda_graph_allocator_map_.erase(iter);
    VLOG(10) << "Remove memory pool of CUDA Graph with ID " << id;
  }
371
#endif
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
#endif

 private:
  class ZeroSizeAllocator : public Allocator {
   public:
    explicit ZeroSizeAllocator(platform::Place place) : place_(place) {}
    bool IsAllocThreadSafe() const override { return true; }

   protected:
    Allocation* AllocateImpl(size_t size) override {
      return new Allocation(nullptr, 0, place_);
    }
    void FreeImpl(Allocation* allocation) override { delete allocation; }

   private:
    platform::Place place_;
  };

  const AllocatorMap& GetAllocatorMap() {
#ifdef PADDLE_WITH_CUDA
392
    if (UNLIKELY(platform::CUDAGraph::IsThisThreadCapturing())) {
393 394 395 396 397 398
      auto id = platform::CUDAGraph::CapturingID();
      auto iter = cuda_graph_allocator_map_.find(id);
      PADDLE_ENFORCE_NE(
          iter, cuda_graph_allocator_map_.end(),
          platform::errors::PermissionDenied(
              "No memory pool is prepared for CUDA Graph capturing."));
399
      VLOG(10) << "Choose CUDA Graph memory pool to allocate memory";
400 401 402
      return iter->second->allocators_;
    } else {
      return allocators_;
403
    }
404 405
#else
    return allocators_;
406 407 408
#endif
  }

409 410 411
  void InitNaiveBestFitCPUAllocator() {
    allocators_[platform::CPUPlace()] =
        std::make_shared<NaiveBestFitAllocator>(platform::CPUPlace());
Y
Yu Yang 已提交
412 413
  }

414
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
415 416 417
  void InitNaiveBestFitCUDAPinnedAllocator() {
    allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<NaiveBestFitAllocator>(platform::CUDAPinnedPlace());
418 419
  }

420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
  void InitStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
    PADDLE_ENFORCE_EQ(
        strategy_, AllocatorStrategy::kAutoGrowth,
        platform::errors::Unimplemented(
            "Only support auto-growth strategey for StreamSafeCUDAAllocator, "
            "the allocator strategy %d is unsupported for multi-stream",
            static_cast<int>(strategy_)));
    VLOG(9) << "Init CUDA allocator for stream " << stream << " in place " << p;
    std::lock_guard<SpinLock> lock_guard(cuda_allocators_lock_);
    try {
      GetAllocator(p, stream);
      VLOG(9) << "Other thread had build a allocator for stream " << stream
              << " in place " << p;
    } catch (platform::EnforceNotMet&) {
      InitAutoGrowthCUDAAllocator(p, stream);
      WrapStreamSafeCUDAAllocator(p, stream);
      WrapCUDARetryAllocator(p, stream, FLAGS_gpu_allocator_retry_time);
    } catch (...) {
      throw;
    }
  }

442 443
  void InitNaiveBestFitCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
444
  }
Y
Yu Yang 已提交
445

446 447 448 449
  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
#if defined(PADDLE_WITH_HIP)
    auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
450
        cuda_allocator, platform::GpuMinChunkSize(), 0, allow_free_idle_chunk_);
451 452 453 454 455 456 457
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
458
      PADDLE_ENFORCE_GPU_SUCCESS(
459 460
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

461
      PADDLE_ENFORCE_GPU_SUCCESS(
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
          paddle::platform::dynload::cuDeviceGetAttribute(
              &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      cuda_allocators_[p][stream] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
      auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
      cuda_allocators_[p][stream] =
          std::make_shared<AutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(),
              allow_free_idle_chunk_);
    }
#else
    auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }

    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
        underlying_allocator, alignment, 0, allow_free_idle_chunk_);
#endif
#endif
519 520
  }

521
  // NOTE(Ruibiao): Old single-stream version, will be removed later
522 523
  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p,
                                   bool allow_free_idle_chunk) {
524 525 526 527 528 529 530 531 532 533 534
#if defined(PADDLE_WITH_HIP)
    auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
        cuda_allocator, platform::GpuMinChunkSize(), allow_free_idle_chunk);
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
535
      PADDLE_ENFORCE_GPU_SUCCESS(
536 537
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

538
      PADDLE_ENFORCE_GPU_SUCCESS(
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
          paddle::platform::dynload::cuDeviceGetAttribute(
              &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      allocators_[p] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
      auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
      allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
          cuda_allocator, platform::GpuMinChunkSize(), allow_free_idle_chunk);
    }

#else
558
    auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
L
Leo Chen 已提交
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }
590
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
L
Leo Chen 已提交
591
        underlying_allocator, alignment, 0, allow_free_idle_chunk);
592 593
#endif
#endif
S
sneaxiy 已提交
594
  }
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625

  void InitThreadLocalCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<ThreadLocalCUDAAllocator>(p);
  }

  void WrapStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
    const std::shared_ptr<Allocator>& underlying_allocator =
        GetAllocator(p, stream);
    cuda_allocators_[p][stream] = std::make_shared<StreamSafeCUDAAllocator>(
        underlying_allocator, p, stream);
  }

  void WrapCUDARetryAllocator(platform::CUDAPlace p, gpuStream_t stream,
                              size_t retry_time) {
    PADDLE_ENFORCE_GT(
        retry_time, 0,
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
    std::shared_ptr<Allocator> allocator = GetAllocator(p, stream);
    allocator = std::make_shared<RetryAllocator>(allocator, retry_time);
  }

  static void CheckCUDAAllocThreadSafe(const CUDAAllocatorMap& allocators) {
    for (auto& place_pair : allocators) {
      for (auto& stream_pair : place_pair.second) {
        PADDLE_ENFORCE_EQ(stream_pair.second->IsAllocThreadSafe(), true,
                          platform::errors::InvalidArgument(
                              "Public allocators must be thread safe"));
      }
    }
  }
626
#endif
S
sneaxiy 已提交
627

628 629 630 631 632 633
#ifdef PADDLE_WITH_XPU
  void InitNaiveBestFitXPUAllocator(platform::XPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

J
jianghaicheng 已提交
634 635 636 637 638 639
#ifdef PADDLE_WITH_IPU
  void InitNaiveBestFitIPUAllocator(platform::IPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

640 641 642 643
#ifdef PADDLE_WITH_ASCEND_CL
  void InitNaiveBestFitNPUAllocator(platform::NPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
644 645 646 647 648

  void InitNaiveBestFitNPUPinnedAllocator() {
    allocators_[platform::NPUPinnedPlace()] =
        std::make_shared<paddle::memory::allocation::NPUPinnedAllocator>();
  }
649 650
#endif

651 652 653 654 655 656 657 658
  void InitSystemAllocators() {
    if (!system_allocators_.empty()) return;
    system_allocators_[platform::CPUPlace()] = std::make_shared<CPUAllocator>();
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::XPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
Z
Zeng Jinle 已提交
659
    }
660
#endif
J
jianghaicheng 已提交
661 662 663 664 665 666 667
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::IPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
    }
#endif
668 669 670
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    system_allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<CPUPinnedAllocator>();
671
    int device_count = platform::GetGPUDeviceCount();
672 673 674 675 676 677
    for (int i = 0; i < device_count; ++i) {
      platform::CUDAPlace p(i);
      system_allocators_[p] = std::make_shared<CUDAAllocator>(p);
    }
#endif
  }
Z
Zeng Jinle 已提交
678 679

  void InitZeroSizeAllocators() {
680
    if (!zero_size_allocators_.empty()) return;
Z
Zeng Jinle 已提交
681 682
    std::vector<platform::Place> places;
    places.emplace_back(platform::CPUPlace());
683
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
684
    int device_count = platform::GetGPUDeviceCount();
Z
Zeng Jinle 已提交
685 686 687 688 689
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::CUDAPlace(dev_id));
    }
    places.emplace_back(platform::CUDAPinnedPlace());
#endif
690 691 692 693 694 695
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::XPUPlace(dev_id));
    }
#endif
696 697 698 699 700 701
#ifdef PADDLE_WITH_ASCEND_CL
    int device_count = platform::GetNPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::NPUPlace(dev_id));
    }
#endif
J
jianghaicheng 已提交
702 703 704 705 706 707
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::IPUPlace(dev_id));
    }
#endif
Z
Zeng Jinle 已提交
708 709 710

    for (auto& p : places) {
      zero_size_allocators_[p] = std::make_shared<ZeroSizeAllocator>(p);
Y
Yu Yang 已提交
711 712
    }
  }
Z
Zeng Jinle 已提交
713

714 715 716 717 718
  static void CheckAllocThreadSafe(const AllocatorMap& allocators) {
    for (auto& pair : allocators) {
      PADDLE_ENFORCE_EQ(pair.second->IsAllocThreadSafe(), true,
                        platform::errors::InvalidArgument(
                            "Public allocators must be thread safe"));
719
    }
720
  }
721

722 723 724 725
  void CheckAllocThreadSafe() const {
    CheckAllocThreadSafe(allocators_);
    CheckAllocThreadSafe(zero_size_allocators_);
    CheckAllocThreadSafe(system_allocators_);
726 727 728 729 730
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    if (FLAGS_use_stream_safe_cuda_allocator) {
      CheckCUDAAllocThreadSafe(cuda_allocators_);
    }
#endif
731 732
  }

733
  // NOTE(Ruibiao): Old single-stream version, will be removed later
734
  void WrapCUDARetryAllocator(size_t retry_time) {
735 736 737 738
    PADDLE_ENFORCE_GT(
        retry_time, 0,
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
739 740 741 742 743 744 745
    for (auto& pair : allocators_) {
      if (platform::is_gpu_place(pair.first)) {
        pair.second = std::make_shared<RetryAllocator>(pair.second, retry_time);
      }
    }
  }

746 747 748
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  // a standalone CUDA allocator to support multi-stream GC in new executor
  CUDAAllocatorMap cuda_allocators_;
749
  std::vector<gpuStream_t> default_streams_;
750
  SpinLock cuda_allocators_lock_;
751 752 753
#ifdef PADDLE_WITH_CUDA
  std::unordered_map<CUDAGraphID, std::unique_ptr<AllocatorFacadePrivate>>
      cuda_graph_allocator_map_;
754
#endif
755 756
#endif
  AllocatorStrategy strategy_;
757
  AllocatorMap allocators_;
758 759
  static AllocatorMap zero_size_allocators_;
  static AllocatorMap system_allocators_;
760
  bool allow_free_idle_chunk_;
761
};
762 763 764 765
AllocatorFacadePrivate::AllocatorMap
    AllocatorFacadePrivate::zero_size_allocators_;
AllocatorFacadePrivate::AllocatorMap AllocatorFacadePrivate::system_allocators_;

Y
Refine  
Yu Yang 已提交
766
// Pimpl. Make interface clean.
767
AllocatorFacade::AllocatorFacade() : m_(new AllocatorFacadePrivate()) {}
768 769 770
// delete m_ may cause core dump when the destructor of python in conflict with
// cpp.
AllocatorFacade::~AllocatorFacade() {}
771 772 773 774 775 776

AllocatorFacade& AllocatorFacade::Instance() {
  static AllocatorFacade instance;
  return instance;
}

777 778 779 780 781
const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
    const platform::Place& place) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (FLAGS_use_stream_safe_cuda_allocator && platform::is_gpu_place(place) &&
      FLAGS_use_system_allocator == false) {
782 783 784 785 786 787 788
#ifdef PADDLE_WITH_CUDA
    if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
      return m_->GetAllocator(place,
                              /* A non-zero num to choose allocator_ */ 1);
    }
#endif

789 790 791
    platform::CUDAPlace cuda_place =
        BOOST_GET_CONST(platform::CUDAPlace, place);
    return m_->GetAllocator(cuda_place, m_->GetDefaultStream(cuda_place));
792 793
  }
#endif
794

795 796 797
  return m_->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1);
}

798
std::shared_ptr<Allocation> AllocatorFacade::AllocShared(
799 800
    const platform::Place& place, size_t size) {
  return std::shared_ptr<Allocation>(Alloc(place, size));
801 802
}

803 804
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place,
                                     size_t size) {
805 806 807
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (FLAGS_use_stream_safe_cuda_allocator && platform::is_gpu_place(place) &&
      size > 0 && FLAGS_use_system_allocator == false) {
808 809 810 811 812 813
#ifdef PADDLE_WITH_CUDA
    if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
      return m_->GetAllocator(place, size)->Allocate(size);
    }
#endif

814 815 816
    platform::CUDAPlace cuda_place =
        BOOST_GET_CONST(platform::CUDAPlace, place);
    return Alloc(cuda_place, size, m_->GetDefaultStream(cuda_place));
817 818
  }
#endif
819

820
  return m_->GetAllocator(place, size)->Allocate(size);
821 822
}

W
Wilber 已提交
823
uint64_t AllocatorFacade::Release(const platform::Place& place) {
824 825 826
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (FLAGS_use_stream_safe_cuda_allocator && platform::is_gpu_place(place) &&
      FLAGS_use_system_allocator == false) {
827 828 829 830 831 832 833 834
#ifdef PADDLE_WITH_CUDA
    if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
      return m_
          ->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1)
          ->Release(place);
    }
#endif

835 836 837
    platform::CUDAPlace cuda_place =
        BOOST_GET_CONST(platform::CUDAPlace, place);
    return Release(cuda_place, m_->GetDefaultStream(cuda_place));
838 839
  }
#endif
W
Wilber 已提交
840
  return m_->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1)
841 842 843
      ->Release(place);
}

844 845 846 847 848 849 850
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
std::shared_ptr<Allocation> AllocatorFacade::AllocShared(
    const platform::CUDAPlace& place, size_t size, const gpuStream_t& stream) {
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
851 852 853
          "multi-stream 'AllocaShared' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
854 855 856 857 858 859 860 861

#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
    PADDLE_THROW(platform::errors::Unavailable(
        "Not allow to use StreamSafeCUDAAllocator with CUDAGraphAllocator"));
  }
#endif

862 863 864 865 866 867 868 869 870
  return std::shared_ptr<Allocation>(Alloc(place, size, stream));
}

AllocationPtr AllocatorFacade::Alloc(const platform::CUDAPlace& place,
                                     size_t size, const gpuStream_t& stream) {
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
871 872 873
          "multi-stream 'Alloc' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
874 875 876 877 878 879 880 881

#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
    PADDLE_THROW(platform::errors::Unavailable(
        "Not allow to use StreamSafeCUDAAllocator with CUDAGraphAllocator"));
  }
#endif

882
  if (LIKELY(size > 0 && FLAGS_use_system_allocator == false)) {
883
    return m_->GetAllocator(place, stream, /* create_if_not_found = */ true)
884 885 886 887 888 889 890 891 892 893 894 895
        ->Allocate(size);
  } else {
    return m_->GetAllocator(place, size)->Allocate(size);
  }
}

uint64_t AllocatorFacade::Release(const platform::CUDAPlace& place,
                                  const gpuStream_t& stream) {
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
896 897 898
          "multi-stream 'Release' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
899 900 901 902 903 904 905 906

#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
    PADDLE_THROW(platform::errors::Unavailable(
        "Not allow to use StreamSafeCUDAAllocator with CUDAGraphAllocator"));
  }
#endif

907 908 909
  return m_->GetAllocator(place, stream)->Release(place);
}

910
void AllocatorFacade::RecordStream(std::shared_ptr<Allocation> allocation,
911 912 913 914 915
                                   const gpuStream_t& stream) {
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
916 917 918
          "'RecordStream' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
919 920 921 922 923 924 925 926

#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
    PADDLE_THROW(platform::errors::Unavailable(
        "Not allow to use StreamSafeCUDAAllocator with CUDAGraphAllocator"));
  }
#endif

927
  m_->RecordStream(allocation, stream);
928 929
}

930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
const gpuStream_t& AllocatorFacade::GetStream(
    const std::shared_ptr<Allocation>& allocation) const {
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
          "'GetStream' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));

#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
    PADDLE_THROW(platform::errors::Unavailable(
        "Not allow to use StreamSafeCUDAAllocator with CUDAGraphAllocator"));
  }
#endif

  return m_->GetStream(allocation);
}

950 951 952 953 954 955 956 957 958
#ifdef PADDLE_WITH_CUDA
void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(CUDAGraphID id) {
  return m_->PrepareMemoryPoolForCUDAGraph(id);
}

void AllocatorFacade::RemoveMemoryPoolOfCUDAGraph(CUDAGraphID id) {
  return m_->RemoveMemoryPoolOfCUDAGraph(id);
}
#endif
959
#endif
960 961 962
}  // namespace allocation
}  // namespace memory
}  // namespace paddle