allocator_facade.cc 40.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16
#include "paddle/fluid/memory/allocation/allocator_facade.h"

17
#include "gflags/gflags.h"
18
#include "paddle/fluid/memory/allocation/aligned_allocator.h"
19
#include "paddle/fluid/memory/allocation/allocator.h"
Y
Yu Yang 已提交
20
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
21
#include "paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.h"
22
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
23
#include "paddle/fluid/memory/allocation/naive_best_fit_allocator.h"
S
sneaxiy 已提交
24
#include "paddle/fluid/memory/allocation/retry_allocator.h"
S
sneaxiy 已提交
25
#include "paddle/fluid/platform/enforce.h"
26
#include "paddle/fluid/platform/place.h"
27

28
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
29
#include <shared_mutex>
30
#include "paddle/fluid/memory/allocation/cuda_allocator.h"
31
#include "paddle/fluid/memory/allocation/cuda_managed_allocator.h"
S
sneaxiy 已提交
32
#include "paddle/fluid/memory/allocation/pinned_allocator.h"
33
#include "paddle/fluid/memory/allocation/stream_safe_cuda_allocator.h"
34
#include "paddle/fluid/memory/allocation/thread_local_allocator.h"
35
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
36
#include "paddle/fluid/platform/device_context.h"
37 38

#ifdef PADDLE_WITH_CUDA
39
#include "paddle/fluid/platform/device/gpu/cuda/cuda_graph.h"
40
#endif
41

42 43 44 45 46
#if CUDA_VERSION >= 10020
#include "paddle/fluid/memory/allocation/cuda_virtual_mem_allocator.h"
#include "paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.h"
#include "paddle/fluid/platform/dynload/cuda_driver.h"
#endif
47
#endif
48

49
#ifdef PADDLE_WITH_XPU
50
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
51
#endif
52 53 54 55

#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/memory/allocation/npu_pinned_allocator.h"
#endif
56

J
jianghaicheng 已提交
57 58 59 60
#ifdef PADDLE_WITH_IPU
#include "paddle/fluid/platform/device/ipu/ipu_info.h"
#endif

F
fwenguang 已提交
61 62 63 64
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/mlu_info.h"
#endif

65 66 67 68 69
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#include "paddle/fluid/memory/allocation/custom_allocator.h"
#include "paddle/fluid/platform/device/device_wrapper.h"
#endif

Z
Zeng Jinle 已提交
70
PADDLE_DEFINE_EXPORTED_int64(
71
    gpu_allocator_retry_time, 10000,
S
sneaxiy 已提交
72 73 74
    "The retry time (milliseconds) when allocator fails "
    "to allocate memory. No retry if this value is not greater than 0");

Z
Zeng Jinle 已提交
75 76 77 78
PADDLE_DEFINE_EXPORTED_bool(
    use_system_allocator, false,
    "Whether to use system allocator to allocate CPU and GPU memory. "
    "Only used for unittests.");
79

80 81 82
PADDLE_DEFINE_EXPORTED_bool(use_virtual_memory_auto_growth, false,
                            "Use VirtualMemoryAutoGrowthBestFitAllocator.");

83 84 85
// NOTE(Ruibiao): This FLAGS is just to be compatibled with
// the old single-stream CUDA allocator. It will be removed
// after StreamSafeCudaAllocator has been fully tested.
86
PADDLE_DEFINE_EXPORTED_bool(use_stream_safe_cuda_allocator, false,
87 88
                            "Enable StreamSafeCUDAAllocator");

89 90 91 92 93
PADDLE_DEFINE_EXPORTED_bool(use_cuda_managed_memory, false,
                            "Whether to use CUDAManagedAllocator to allocate "
                            "managed memory, only available for auto_growth "
                            "strategy");

94 95
DECLARE_string(allocator_strategy);

96 97 98 99
namespace paddle {
namespace memory {
namespace allocation {

100 101 102 103 104 105 106 107
#ifdef PADDLE_WITH_CUDA
class CUDAGraphAllocator
    : public Allocator,
      public std::enable_shared_from_this<CUDAGraphAllocator> {
 private:
  class PrivateAllocation : public Allocation {
   public:
    PrivateAllocation(CUDAGraphAllocator* allocator,
108
                      DecoratedAllocationPtr underlying_allocation)
F
From00 已提交
109 110 111
        : Allocation(
              underlying_allocation->ptr(), underlying_allocation->base_ptr(),
              underlying_allocation->size(), underlying_allocation->place()),
112 113 114 115 116
          allocator_(allocator->shared_from_this()),
          underlying_allocation_(std::move(underlying_allocation)) {}

   private:
    std::shared_ptr<Allocator> allocator_;
117
    DecoratedAllocationPtr underlying_allocation_;
118 119 120 121 122 123 124 125 126 127 128 129
  };

  explicit CUDAGraphAllocator(const std::shared_ptr<Allocator>& allocator)
      : underlying_allocator_(allocator) {}

 public:
  static std::shared_ptr<Allocator> Create(
      const std::shared_ptr<Allocator>& allocator) {
    return std::shared_ptr<Allocator>(new CUDAGraphAllocator(allocator));
  }

 protected:
130
  phi::Allocation* AllocateImpl(size_t size) {
131
    VLOG(10) << "Allocate " << size << " for CUDA Graph";
132 133 134
    return new PrivateAllocation(this,
                                 static_unique_ptr_cast<Allocation>(
                                     underlying_allocator_->Allocate(size)));
135 136
  }

137
  void FreeImpl(phi::Allocation* allocation) {
138 139 140 141 142 143 144 145 146
    VLOG(10) << "delete for CUDA Graph";
    delete allocation;
  }

 private:
  std::shared_ptr<Allocator> underlying_allocator_;
};
#endif

Y
Yu Yang 已提交
147 148
class AllocatorFacadePrivate {
 public:
149 150
  using AllocatorMap = std::map<platform::Place, std::shared_ptr<Allocator>>;

151 152 153 154 155 156
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  using CUDAAllocatorMap =
      std::map<platform::CUDAPlace,
               std::map<gpuStream_t, std::shared_ptr<Allocator>>>;
#endif

157 158 159
  explicit AllocatorFacadePrivate(bool allow_free_idle_chunk = true) {
    strategy_ = GetAllocatorStrategy();
    switch (strategy_) {
160 161
      case AllocatorStrategy::kNaiveBestFit: {
        InitNaiveBestFitCPUAllocator();
J
jianghaicheng 已提交
162 163 164 165 166
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
167
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
168 169 170 171 172 173
        PADDLE_ENFORCE_EQ(
            FLAGS_use_stream_safe_cuda_allocator, false,
            paddle::platform::errors::Unimplemented(
                "StreamSafeCUDAAllocator is only implemented for auto_growth "
                "strategy, not support naive_best_fit strategy"));

174
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
175 176 177
          InitNaiveBestFitCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
178
#endif
179 180 181 182 183
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
184 185 186 187
#ifdef PADDLE_WITH_ASCEND_CL
        for (int dev_id = 0; dev_id < platform::GetNPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitNPUAllocator(platform::NPUPlace(dev_id));
        }
188
        InitNaiveBestFitNPUPinnedAllocator();
F
fwenguang 已提交
189 190 191 192 193
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
194 195
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
196
        auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
197 198
        for (const auto& dev_type : device_types) {
          for (size_t dev_id = 0;
199
               dev_id < phi::DeviceManager::GetDeviceCount(dev_type);
200 201 202 203 204
               ++dev_id) {
            InitNaiveBestFitCustomDeviceAllocator(
                platform::CustomPlace(dev_type, dev_id));
          }
        }
205
#endif
Z
Zeng Jinle 已提交
206 207
        break;
      }
208 209 210

      case AllocatorStrategy::kAutoGrowth: {
        InitNaiveBestFitCPUAllocator();
211 212
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
        allow_free_idle_chunk_ = allow_free_idle_chunk;
213
        if (!FLAGS_use_stream_safe_cuda_allocator) {
214
          for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount();
215 216 217 218 219 220 221
               ++dev_id) {
            InitAutoGrowthCUDAAllocator(platform::CUDAPlace(dev_id),
                                        allow_free_idle_chunk_);
          }
        }
        InitNaiveBestFitCUDAPinnedAllocator();
#endif
222 223 224 225
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
J
jianghaicheng 已提交
226 227 228 229 230
#endif
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
F
fwenguang 已提交
231 232 233 234 235
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
236 237
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
238
        auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
239 240
        for (const auto& dev_type : device_types) {
          for (size_t dev_id = 0;
241
               dev_id < phi::DeviceManager::GetDeviceCount(dev_type);
242 243 244 245 246
               ++dev_id) {
            InitAutoGrowthCustomDeviceAllocator(
                platform::CustomPlace(dev_type, dev_id), allow_free_idle_chunk);
          }
        }
247
#endif
Z
Zeng Jinle 已提交
248 249
        break;
      }
250

251 252
      case AllocatorStrategy::kThreadLocal: {
        InitNaiveBestFitCPUAllocator();
253 254 255 256 257
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
J
jianghaicheng 已提交
258 259 260 261 262
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
263
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
264 265 266 267 268
        PADDLE_ENFORCE_EQ(
            FLAGS_use_stream_safe_cuda_allocator, false,
            paddle::platform::errors::Unimplemented(
                "StreamSafeCUDAAllocator is only implemented for auto_growth "
                "strategy, not support thread_local strategy"));
269

270
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
271 272 273
          InitThreadLocalCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
F
fwenguang 已提交
274 275 276 277 278
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
279 280 281 282
#endif
        break;
      }

Z
Zeng Jinle 已提交
283
      default: {
284
        PADDLE_THROW(platform::errors::InvalidArgument(
285
            "Unsupported allocator strategy: %d", static_cast<int>(strategy_)));
Z
Zeng Jinle 已提交
286
      }
Y
Yu Yang 已提交
287
    }
Z
Zeng Jinle 已提交
288
    InitZeroSizeAllocators();
289
    InitSystemAllocators();
290 291 292 293 294 295

    if (FLAGS_gpu_allocator_retry_time > 0) {
      WrapCUDARetryAllocator(FLAGS_gpu_allocator_retry_time);
    }

    CheckAllocThreadSafe();
296 297 298 299 300 301

#ifdef PADDLE_WITH_CUDA
    if (UNLIKELY(platform::CUDAGraph::IsThisThreadCapturing())) {
      WrapCUDAGraphAllocator();
    }
#endif
Z
Zeng Jinle 已提交
302 303 304 305
  }

  inline const std::shared_ptr<Allocator>& GetAllocator(
      const platform::Place& place, size_t size) {
306
    VLOG(6) << "GetAllocator"
L
Leo Chen 已提交
307
            << " " << place << " " << size;
308 309
    const auto& allocators =
        (size > 0 ? (UNLIKELY(FLAGS_use_system_allocator) ? system_allocators_
310
                                                          : GetAllocatorMap())
311
                  : zero_size_allocators_);
Z
Zeng Jinle 已提交
312
    auto iter = allocators.find(place);
313 314 315
    PADDLE_ENFORCE_NE(iter, allocators.end(),
                      platform::errors::NotFound(
                          "No allocator found for the place, %s", place));
Z
Zeng Jinle 已提交
316
    return iter->second;
317 318
  }

319
  void* GetBasePtr(const std::shared_ptr<phi::Allocation>& allocation) {
320 321 322
    return static_cast<Allocation*>(allocation.get())->base_ptr();
  }

323
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
324 325 326 327 328 329 330 331 332 333 334
  bool HasCUDAAllocator(const platform::CUDAPlace& place,
                        const gpuStream_t& stream) {
    auto it = cuda_allocators_.find(place);
    if (it == cuda_allocators_.end()) {
      return false;
    }
    const std::map<gpuStream_t, std::shared_ptr<Allocator>>& allocator_map =
        it->second;
    return allocator_map.find(stream) != allocator_map.end();
  }

335 336 337
  const std::shared_ptr<Allocator>& GetAllocator(
      const platform::CUDAPlace& place, const gpuStream_t& stream,
      bool create_if_not_found = false) {
338 339 340 341
    {  // shared_lock_guard
      std::shared_lock<std::shared_timed_mutex> lock_guard(
          cuda_allocator_mutex_);
      if (LIKELY(HasCUDAAllocator(place, stream))) {
342 343
        return cuda_allocators_[place][stream];
      } else {
344 345 346 347 348
        PADDLE_ENFORCE_NE(create_if_not_found, false,
                          platform::errors::NotFound(
                              "No allocator found for stream %s in place %s "
                              "with create_if_not_found = false",
                              stream, place));
349 350 351
      }
    }

352 353 354 355 356
    {  // unique_lock_guard
      std::unique_lock<std::shared_timed_mutex> lock_guard(
          cuda_allocator_mutex_);
      InitStreamSafeCUDAAllocator(place, stream);
      return cuda_allocators_[place][stream];
357
    }
358 359 360 361 362
  }

  gpuStream_t GetDefaultStream(const platform::CUDAPlace& place) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    return static_cast<platform::CUDADeviceContext*>(pool.Get(place))->stream();
363
  }
364

365
  void RecordStream(std::shared_ptr<phi::Allocation> allocation,
366
                    const gpuStream_t& stream) {
367 368 369 370
    if (allocation->size() == 0) {
      return;
    }

371 372 373 374 375 376 377 378 379 380 381
    StreamSafeCUDAAllocation* stream_safe_cuda_allocation =
        dynamic_cast<StreamSafeCUDAAllocation*>(allocation.get());
    PADDLE_ENFORCE_NOT_NULL(stream_safe_cuda_allocation,
                            platform::errors::InvalidArgument(
                                "Failed to dynamic cast %p from Allocation* to "
                                "StreamSafeCUDAAllocation*",
                                allocation.get()));
    stream_safe_cuda_allocation->RecordStream(stream);
  }

  const gpuStream_t& GetStream(
382
      const std::shared_ptr<phi::Allocation>& allocation) const {
383 384 385 386 387 388 389 390
    const StreamSafeCUDAAllocation* stream_safe_cuda_allocation =
        dynamic_cast<const StreamSafeCUDAAllocation*>(allocation.get());
    PADDLE_ENFORCE_NOT_NULL(stream_safe_cuda_allocation,
                            platform::errors::InvalidArgument(
                                "Failed to dynamic cast %p from Allocation* to "
                                "StreamSafeCUDAAllocation*",
                                allocation.get()));
    return stream_safe_cuda_allocation->GetOwningStream();
391 392 393 394 395 396 397 398 399 400
  }
#endif

 private:
  class ZeroSizeAllocator : public Allocator {
   public:
    explicit ZeroSizeAllocator(platform::Place place) : place_(place) {}
    bool IsAllocThreadSafe() const override { return true; }

   protected:
401
    phi::Allocation* AllocateImpl(size_t size) override {
402 403
      return new Allocation(nullptr, 0, place_);
    }
404
    void FreeImpl(phi::Allocation* allocation) override { delete allocation; }
405 406 407 408 409

   private:
    platform::Place place_;
  };

410
  const AllocatorMap& GetAllocatorMap() { return allocators_; }
411

412 413 414
  void InitNaiveBestFitCPUAllocator() {
    allocators_[platform::CPUPlace()] =
        std::make_shared<NaiveBestFitAllocator>(platform::CPUPlace());
Y
Yu Yang 已提交
415 416
  }

417
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
418 419 420
  void InitNaiveBestFitCUDAPinnedAllocator() {
    allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<NaiveBestFitAllocator>(platform::CUDAPinnedPlace());
421 422
  }

423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
  void InitNaiveBestFitCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }

  // Create a new CUDAAllocator or CUDAManagedAllocator for the given device
  std::shared_ptr<Allocator> CreateCUDAAllocator(platform::CUDAPlace p) {
    if (FLAGS_use_cuda_managed_memory) {
      PADDLE_ENFORCE_EQ(
          strategy_, AllocatorStrategy::kAutoGrowth,
          platform::errors::InvalidArgument(
              "CUDA managed memory is only implemented for auto_growth "
              "strategy, not support %s strategy.\n"
              "Please use auto_growth strategy by command `export "
              "FLAGS_allocator_strategy=\"auto_growth\"`, or disable managed "
              "memory by command `export FLAGS_use_cuda_managed_memory=false`",
              FLAGS_allocator_strategy));

      if (!platform::IsGPUManagedMemorySupported(p.device)) {
        PADDLE_THROW(platform::errors::Unavailable(
            "Failed to create CUDAManagedAllocator on GPU %d.\n\n"
            "You have enabled CUDA managed memory, but the gpu device does not "
            "support allocating managed memory.\n"
            "If you don't actually need to use managed memory, please disable "
            "it with command `export FLAGS_use_cuda_managed_memory=false`.\n"
447 448
            "Or you must use the gpu device that supports managed memory.",
            p.device));
449 450 451 452 453 454
      }
      return std::make_shared<CUDAManagedAllocator>(p);
    }
    return std::make_shared<CUDAAllocator>(p);
  }

455 456 457 458 459 460 461
  void InitStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
    PADDLE_ENFORCE_EQ(
        strategy_, AllocatorStrategy::kAutoGrowth,
        platform::errors::Unimplemented(
            "Only support auto-growth strategey for StreamSafeCUDAAllocator, "
            "the allocator strategy %d is unsupported for multi-stream",
            static_cast<int>(strategy_)));
462 463 464
    if (LIKELY(!HasCUDAAllocator(p, stream))) {
      VLOG(8) << "Init CUDA allocator for stream " << stream << " in place "
              << p;
465 466 467 468 469 470 471 472
      InitAutoGrowthCUDAAllocator(p, stream);
      WrapStreamSafeCUDAAllocator(p, stream);
      WrapCUDARetryAllocator(p, stream, FLAGS_gpu_allocator_retry_time);
    }
  }

  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
#if defined(PADDLE_WITH_HIP)
473
    auto cuda_allocator = CreateCUDAAllocator(p);
474
    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
475
        cuda_allocator, platform::GpuMinChunkSize(), 0, allow_free_idle_chunk_);
476 477 478 479 480 481 482
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
483
      PADDLE_ENFORCE_GPU_SUCCESS(
484 485
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

486
      PADDLE_ENFORCE_GPU_SUCCESS(
487 488 489 490 491 492 493 494 495 496 497 498 499
          paddle::platform::dynload::cuDeviceGetAttribute(
              &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      cuda_allocators_[p][stream] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
500
      auto cuda_allocator = CreateCUDAAllocator(p);
501 502 503 504 505 506
      cuda_allocators_[p][stream] =
          std::make_shared<AutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(),
              allow_free_idle_chunk_);
    }
#else
507
    auto cuda_allocator = CreateCUDAAllocator(p);
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }

    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
        underlying_allocator, alignment, 0, allow_free_idle_chunk_);
#endif
#endif
544 545
  }

546
  // NOTE(Ruibiao): Old single-stream version, will be removed later
547 548
  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p,
                                   bool allow_free_idle_chunk) {
549
#if defined(PADDLE_WITH_HIP)
550
    auto cuda_allocator = CreateCUDAAllocator(p);
551 552 553 554 555 556 557 558 559
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
        cuda_allocator, platform::GpuMinChunkSize(), allow_free_idle_chunk);
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
560
      PADDLE_ENFORCE_GPU_SUCCESS(
561 562
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

563
      PADDLE_ENFORCE_GPU_SUCCESS(
564 565 566 567 568 569 570 571 572 573 574 575 576
          paddle::platform::dynload::cuDeviceGetAttribute(
              &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      allocators_[p] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
577
      auto cuda_allocator = CreateCUDAAllocator(p);
578 579 580 581 582
      allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
          cuda_allocator, platform::GpuMinChunkSize(), allow_free_idle_chunk);
    }

#else
583
    auto cuda_allocator = CreateCUDAAllocator(p);
L
Leo Chen 已提交
584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }
615
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
L
Leo Chen 已提交
616
        underlying_allocator, alignment, 0, allow_free_idle_chunk);
617 618
#endif
#endif
S
sneaxiy 已提交
619
  }
620 621 622 623 624 625

  void InitThreadLocalCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<ThreadLocalCUDAAllocator>(p);
  }

  void WrapStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
626 627 628 629
    std::shared_ptr<Allocator>& allocator = cuda_allocators_[p][stream];
    allocator = std::make_shared<StreamSafeCUDAAllocator>(
        allocator, p, stream,
        /* in_cuda_graph_capturing = */ !allow_free_idle_chunk_);
630 631 632 633 634 635 636 637
  }

  void WrapCUDARetryAllocator(platform::CUDAPlace p, gpuStream_t stream,
                              size_t retry_time) {
    PADDLE_ENFORCE_GT(
        retry_time, 0,
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
638
    std::shared_ptr<Allocator>& allocator = cuda_allocators_[p][stream];
639 640 641
    allocator = std::make_shared<RetryAllocator>(allocator, retry_time);
  }

642 643 644 645 646 647 648 649 650
#ifdef PADDLE_WITH_CUDA
  void WrapCUDAGraphAllocator() {
    for (auto& item : allocators_) {
      auto& allocator = item.second;
      allocator = CUDAGraphAllocator::Create(allocator);
    }
  }
#endif

651 652 653 654 655 656 657 658 659
  static void CheckCUDAAllocThreadSafe(const CUDAAllocatorMap& allocators) {
    for (auto& place_pair : allocators) {
      for (auto& stream_pair : place_pair.second) {
        PADDLE_ENFORCE_EQ(stream_pair.second->IsAllocThreadSafe(), true,
                          platform::errors::InvalidArgument(
                              "Public allocators must be thread safe"));
      }
    }
  }
660
#endif
S
sneaxiy 已提交
661

662 663 664 665 666 667
#ifdef PADDLE_WITH_XPU
  void InitNaiveBestFitXPUAllocator(platform::XPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

J
jianghaicheng 已提交
668 669 670 671 672 673
#ifdef PADDLE_WITH_IPU
  void InitNaiveBestFitIPUAllocator(platform::IPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

F
fwenguang 已提交
674 675 676 677 678 679
#ifdef PADDLE_WITH_MLU
  void InitNaiveBestFitMLUAllocator(platform::MLUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

680 681 682 683
#ifdef PADDLE_WITH_ASCEND_CL
  void InitNaiveBestFitNPUAllocator(platform::NPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
684 685 686 687 688

  void InitNaiveBestFitNPUPinnedAllocator() {
    allocators_[platform::NPUPinnedPlace()] =
        std::make_shared<paddle::memory::allocation::NPUPinnedAllocator>();
  }
689 690
#endif

691 692 693 694 695 696 697 698 699 700
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  void InitNaiveBestFitCustomDeviceAllocator(platform::CustomPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }

  void InitAutoGrowthCustomDeviceAllocator(platform::CustomPlace p,
                                           bool allow_free_idle_chunk) {
    auto custom_allocator =
        std::make_shared<paddle::memory::allocation::CustomAllocator>(p);
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
701
        custom_allocator, phi::DeviceManager::GetMinChunkSize(p),
702 703 704 705
        allow_free_idle_chunk);
  }
#endif

706 707 708 709 710 711 712 713
  void InitSystemAllocators() {
    if (!system_allocators_.empty()) return;
    system_allocators_[platform::CPUPlace()] = std::make_shared<CPUAllocator>();
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::XPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
Z
Zeng Jinle 已提交
714
    }
715
#endif
J
jianghaicheng 已提交
716 717 718 719 720 721 722
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::IPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
    }
#endif
723 724 725
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    system_allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<CPUPinnedAllocator>();
726
    int device_count = platform::GetGPUDeviceCount();
727 728
    for (int i = 0; i < device_count; ++i) {
      platform::CUDAPlace p(i);
729
      system_allocators_[p] = CreateCUDAAllocator(p);
730
    }
F
fwenguang 已提交
731 732 733 734
#endif
#ifdef PADDLE_WITH_MLU
    int device_count = platform::GetMLUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
735
      platform::MLUPlace p(i);
F
fwenguang 已提交
736 737
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
    }
738 739
#endif
  }
Z
Zeng Jinle 已提交
740 741

  void InitZeroSizeAllocators() {
742
    if (!zero_size_allocators_.empty()) return;
Z
Zeng Jinle 已提交
743 744
    std::vector<platform::Place> places;
    places.emplace_back(platform::CPUPlace());
745
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
746
    int device_count = platform::GetGPUDeviceCount();
Z
Zeng Jinle 已提交
747 748 749 750 751
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::CUDAPlace(dev_id));
    }
    places.emplace_back(platform::CUDAPinnedPlace());
#endif
752 753 754 755 756 757
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::XPUPlace(dev_id));
    }
#endif
758 759 760 761 762 763
#ifdef PADDLE_WITH_ASCEND_CL
    int device_count = platform::GetNPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::NPUPlace(dev_id));
    }
#endif
J
jianghaicheng 已提交
764 765 766 767 768 769
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::IPUPlace(dev_id));
    }
#endif
F
fwenguang 已提交
770 771 772 773 774 775
#ifdef PADDLE_WITH_MLU
    int device_count = platform::GetMLUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::MLUPlace(dev_id));
    }
#endif
776
#ifdef PADDLE_WITH_CUSTOM_DEVICE
777
    auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
778 779
    for (const auto& dev_type : device_types) {
      for (size_t dev_id = 0;
780
           dev_id < phi::DeviceManager::GetDeviceCount(dev_type); dev_id++) {
781 782 783 784
        places.emplace_back(platform::CustomPlace(dev_type, dev_id));
      }
    }
#endif
Z
Zeng Jinle 已提交
785 786 787

    for (auto& p : places) {
      zero_size_allocators_[p] = std::make_shared<ZeroSizeAllocator>(p);
Y
Yu Yang 已提交
788 789
    }
  }
Z
Zeng Jinle 已提交
790

791 792 793 794 795
  static void CheckAllocThreadSafe(const AllocatorMap& allocators) {
    for (auto& pair : allocators) {
      PADDLE_ENFORCE_EQ(pair.second->IsAllocThreadSafe(), true,
                        platform::errors::InvalidArgument(
                            "Public allocators must be thread safe"));
796
    }
797
  }
798

799 800 801 802
  void CheckAllocThreadSafe() const {
    CheckAllocThreadSafe(allocators_);
    CheckAllocThreadSafe(zero_size_allocators_);
    CheckAllocThreadSafe(system_allocators_);
803 804 805 806 807
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    if (FLAGS_use_stream_safe_cuda_allocator) {
      CheckCUDAAllocThreadSafe(cuda_allocators_);
    }
#endif
808 809
  }

810
  // NOTE(Ruibiao): Old single-stream version, will be removed later
811
  void WrapCUDARetryAllocator(size_t retry_time) {
812 813 814 815
    PADDLE_ENFORCE_GT(
        retry_time, 0,
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
816 817 818 819 820 821 822
    for (auto& pair : allocators_) {
      if (platform::is_gpu_place(pair.first)) {
        pair.second = std::make_shared<RetryAllocator>(pair.second, retry_time);
      }
    }
  }

823 824 825
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  // a standalone CUDA allocator to support multi-stream GC in new executor
  CUDAAllocatorMap cuda_allocators_;
826
  std::shared_timed_mutex cuda_allocator_mutex_;
827 828
#endif
  AllocatorStrategy strategy_;
829
  AllocatorMap allocators_;
830 831
  static AllocatorMap zero_size_allocators_;
  static AllocatorMap system_allocators_;
832
  bool allow_free_idle_chunk_;
833
};
834 835 836 837
AllocatorFacadePrivate::AllocatorMap
    AllocatorFacadePrivate::zero_size_allocators_;
AllocatorFacadePrivate::AllocatorMap AllocatorFacadePrivate::system_allocators_;

Y
Refine  
Yu Yang 已提交
838
// Pimpl. Make interface clean.
839
AllocatorFacade::AllocatorFacade() : m_(new AllocatorFacadePrivate()) {}
840 841 842
// delete m_ may cause core dump when the destructor of python in conflict with
// cpp.
AllocatorFacade::~AllocatorFacade() {}
843 844

AllocatorFacade& AllocatorFacade::Instance() {
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
  static AllocatorFacade* instance = new AllocatorFacade;
  return *instance;
}

AllocatorFacadePrivate* AllocatorFacade::GetPrivate() const {
#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsThisThreadCapturing())) {
    auto id = platform::CUDAGraph::CapturingID();
    auto iter = cuda_graph_map_.find(id);
    PADDLE_ENFORCE_NE(
        iter, cuda_graph_map_.end(),
        platform::errors::PermissionDenied(
            "No memory pool is prepared for CUDA Graph capturing."));
    VLOG(10) << "Choose CUDA Graph memory pool";
    return iter->second.get();
  }
#endif
  return m_;
863 864
}

865 866 867 868 869
const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
    const platform::Place& place) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (FLAGS_use_stream_safe_cuda_allocator && platform::is_gpu_place(place) &&
      FLAGS_use_system_allocator == false) {
870
    AllocatorFacadePrivate* m = GetPrivate();
871
    platform::CUDAPlace cuda_place(place.GetDeviceId());
872
    return m->GetAllocator(cuda_place, m->GetDefaultStream(cuda_place));
873 874
  }
#endif
875

876 877
  return GetPrivate()->GetAllocator(
      place, /* A non-zero num to choose allocator_ */ 1);
878 879
}

880
void* AllocatorFacade::GetBasePtr(
881
    const std::shared_ptr<phi::Allocation>& allocation) {
882 883 884 885 886 887 888 889 890 891
  PADDLE_ENFORCE_EQ(GetAllocatorStrategy(), AllocatorStrategy::kAutoGrowth,
                    paddle::platform::errors::Unimplemented(
                        "GetBasePtr() is only implemented for auto_growth "
                        "strategy, not support allocator strategy: %d",
                        static_cast<int>(GetAllocatorStrategy())));
  PADDLE_ENFORCE_EQ(platform::is_gpu_place(allocation->place()), true,
                    paddle::platform::errors::Unimplemented(
                        "GetBasePtr() is only implemented for CUDAPlace(), not "
                        "suppot place: %s",
                        allocation->place()));
892
  return GetPrivate()->GetBasePtr(allocation);
893 894
}

895 896 897 898 899
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
    const platform::Place& place, const gpuStream_t& stream) {
  if (FLAGS_use_stream_safe_cuda_allocator && platform::is_gpu_place(place) &&
      FLAGS_use_system_allocator == false) {
900 901
    return GetPrivate()->GetAllocator(place, stream,
                                      /*create_if_not_found=*/true);
902
  }
903 904
  return GetPrivate()->GetAllocator(
      place, /* A non-zero num to choose allocator_ */ 1);
905 906 907 908 909
}
#endif

const std::shared_ptr<Allocator>& AllocatorFacade::GetZeroAllocator(
    const platform::Place& place) {
910
  return GetPrivate()->GetAllocator(place, /* zero size */ 0);
911 912
}

913
std::shared_ptr<phi::Allocation> AllocatorFacade::AllocShared(
914
    const platform::Place& place, size_t size) {
915
  return std::shared_ptr<phi::Allocation>(Alloc(place, size));
916 917
}

918 919
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place,
                                     size_t size) {
920 921 922
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (FLAGS_use_stream_safe_cuda_allocator && platform::is_gpu_place(place) &&
      size > 0 && FLAGS_use_system_allocator == false) {
923
    platform::CUDAPlace cuda_place(place.GetDeviceId());
924 925 926
    phi::Stream default_stream = phi::Stream(reinterpret_cast<phi::StreamId>(
        GetPrivate()->GetDefaultStream(cuda_place)));
    return Alloc(cuda_place, size, default_stream);
927 928
  }
#endif
929
  return GetPrivate()->GetAllocator(place, size)->Allocate(size);
930 931
}

W
Wilber 已提交
932
uint64_t AllocatorFacade::Release(const platform::Place& place) {
933 934 935
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (FLAGS_use_stream_safe_cuda_allocator && platform::is_gpu_place(place) &&
      FLAGS_use_system_allocator == false) {
936
    platform::CUDAPlace cuda_place(place.GetDeviceId());
937
    return Release(cuda_place, GetPrivate()->GetDefaultStream(cuda_place));
938 939
  }
#endif
940 941
  return GetPrivate()
      ->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1)
942 943 944
      ->Release(place);
}

945 946
std::shared_ptr<phi::Allocation> AllocatorFacade::AllocShared(
    const platform::Place& place, size_t size, const phi::Stream& stream) {
947 948 949 950
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
951 952 953
          "multi-stream 'AllocaShared' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
954
  return std::shared_ptr<phi::Allocation>(Alloc(place, size, stream));
955 956
}

957 958
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place, size_t size,
                                     const phi::Stream& stream) {
959 960 961 962 963
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
964
          "multi-stream 'Alloc' function. To enable it, you can enter"
965 966 967
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));

968 969 970 971 972 973 974 975
  platform::CUDAPlace p(place.GetDeviceId());
  if (LIKELY(size > 0 && FLAGS_use_system_allocator == false)) {
    gpuStream_t s = reinterpret_cast<gpuStream_t>(stream.id());
    return GetPrivate()
        ->GetAllocator(p, s, /* create_if_not_found = */ true)
        ->Allocate(size);
  } else {
    return GetPrivate()->GetAllocator(p, size)->Allocate(size);
976 977 978 979 980 981
  }
#else
  PADDLE_THROW(platform::errors::PreconditionNotMet("Not compiled with GPU."));
#endif
}

982 983 984
bool AllocatorFacade::InSameStream(
    const std::shared_ptr<phi::Allocation>& allocation,
    const phi::Stream& stream) {
985
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
986 987 988 989
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
990
          "multi-stream 'InSameStream' function. To enable it, you can enter"
991 992
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
993 994 995 996
  gpuStream_t s = reinterpret_cast<gpuStream_t>(stream.id());
  return s == GetStream(allocation);
#else
  PADDLE_THROW(platform::errors::PreconditionNotMet("Not compiled with GPU."));
997
#endif
998 999
}

1000
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1001 1002 1003 1004 1005 1006
uint64_t AllocatorFacade::Release(const platform::CUDAPlace& place,
                                  const gpuStream_t& stream) {
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
1007 1008 1009
          "multi-stream 'Release' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
1010
  return GetPrivate()->GetAllocator(place, stream)->Release(place);
1011 1012
}

1013
void AllocatorFacade::RecordStream(std::shared_ptr<phi::Allocation> allocation,
1014 1015 1016 1017 1018
                                   const gpuStream_t& stream) {
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
1019 1020 1021
          "'RecordStream' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
1022
  GetPrivate()->RecordStream(allocation, stream);
1023 1024
}

1025
const gpuStream_t& AllocatorFacade::GetStream(
1026
    const std::shared_ptr<phi::Allocation>& allocation) const {
1027 1028 1029 1030 1031 1032 1033
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
          "'GetStream' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
1034
  return GetPrivate()->GetStream(allocation);
1035 1036
}

1037 1038
#ifdef PADDLE_WITH_CUDA
void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(CUDAGraphID id) {
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
  PADDLE_ENFORCE_EQ(GetAllocatorStrategy(), AllocatorStrategy::kAutoGrowth,
                    platform::errors::InvalidArgument(
                        "CUDA Graph is only supported when the "
                        "FLAGS_allocator_strategy=\"auto_growth\", but got "
                        "FLAGS_allocator_strategy=\"%s\"",
                        FLAGS_allocator_strategy));
  auto& allocator = cuda_graph_map_[id];
  PADDLE_ENFORCE_EQ(
      allocator.get(), nullptr,
      platform::errors::InvalidArgument(
          "The memory pool of the CUDA Graph with ID %d have been prepared.",
          id));
  allocator.reset(new AllocatorFacadePrivate(/*allow_free_idle_chunk=*/false));
  VLOG(10) << "Prepare memory pool for CUDA Graph with ID " << id;
1053 1054 1055
}

void AllocatorFacade::RemoveMemoryPoolOfCUDAGraph(CUDAGraphID id) {
1056 1057 1058 1059 1060 1061
  auto iter = cuda_graph_map_.find(id);
  PADDLE_ENFORCE_NE(iter, cuda_graph_map_.end(),
                    platform::errors::InvalidArgument(
                        "Cannot find CUDA Graph with ID = %d", id));
  cuda_graph_map_.erase(iter);
  VLOG(10) << "Remove memory pool of CUDA Graph with ID " << id;
1062 1063
}
#endif
1064
#endif
1065 1066 1067
}  // namespace allocation
}  // namespace memory
}  // namespace paddle