allocator_facade.cc 39.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16
#include "paddle/fluid/memory/allocation/allocator_facade.h"

17
#include "gflags/gflags.h"
18
#include "paddle/fluid/memory/allocation/aligned_allocator.h"
19
#include "paddle/fluid/memory/allocation/allocator.h"
Y
Yu Yang 已提交
20
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
21
#include "paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.h"
22
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
23
#include "paddle/fluid/memory/allocation/naive_best_fit_allocator.h"
S
sneaxiy 已提交
24
#include "paddle/fluid/memory/allocation/retry_allocator.h"
S
sneaxiy 已提交
25
#include "paddle/fluid/platform/enforce.h"
26
#include "paddle/fluid/platform/place.h"
27

28
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
29
#include <shared_mutex>
30
#include "paddle/fluid/memory/allocation/cuda_allocator.h"
S
sneaxiy 已提交
31
#include "paddle/fluid/memory/allocation/pinned_allocator.h"
32
#include "paddle/fluid/memory/allocation/stream_safe_cuda_allocator.h"
33
#include "paddle/fluid/memory/allocation/thread_local_allocator.h"
34
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
35
#include "paddle/fluid/platform/device_context.h"
36 37

#ifdef PADDLE_WITH_CUDA
38
#include "paddle/fluid/platform/device/gpu/cuda/cuda_graph.h"
39
#endif
40

41 42 43 44 45
#if CUDA_VERSION >= 10020
#include "paddle/fluid/memory/allocation/cuda_virtual_mem_allocator.h"
#include "paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.h"
#include "paddle/fluid/platform/dynload/cuda_driver.h"
#endif
46
#endif
47

48
#ifdef PADDLE_WITH_XPU
49
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
50
#endif
51 52 53 54

#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/memory/allocation/npu_pinned_allocator.h"
#endif
55

J
jianghaicheng 已提交
56 57 58 59
#ifdef PADDLE_WITH_IPU
#include "paddle/fluid/platform/device/ipu/ipu_info.h"
#endif

F
fwenguang 已提交
60 61 62 63
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/mlu_info.h"
#endif

Z
Zeng Jinle 已提交
64
PADDLE_DEFINE_EXPORTED_int64(
65
    gpu_allocator_retry_time, 10000,
S
sneaxiy 已提交
66 67 68
    "The retry time (milliseconds) when allocator fails "
    "to allocate memory. No retry if this value is not greater than 0");

Z
Zeng Jinle 已提交
69 70 71 72
PADDLE_DEFINE_EXPORTED_bool(
    use_system_allocator, false,
    "Whether to use system allocator to allocate CPU and GPU memory. "
    "Only used for unittests.");
73

74 75 76
PADDLE_DEFINE_EXPORTED_bool(use_virtual_memory_auto_growth, false,
                            "Use VirtualMemoryAutoGrowthBestFitAllocator.");

77 78 79
// NOTE(Ruibiao): This FLAGS is just to be compatibled with
// the old single-stream CUDA allocator. It will be removed
// after StreamSafeCudaAllocator has been fully tested.
80
PADDLE_DEFINE_EXPORTED_bool(use_stream_safe_cuda_allocator, false,
81 82
                            "Enable StreamSafeCUDAAllocator");

83 84
DECLARE_string(allocator_strategy);

85 86 87 88
namespace paddle {
namespace memory {
namespace allocation {

89 90 91 92 93 94 95 96
#ifdef PADDLE_WITH_CUDA
class CUDAGraphAllocator
    : public Allocator,
      public std::enable_shared_from_this<CUDAGraphAllocator> {
 private:
  class PrivateAllocation : public Allocation {
   public:
    PrivateAllocation(CUDAGraphAllocator* allocator,
97
                      DecoratedAllocationPtr underlying_allocation)
F
From00 已提交
98 99 100
        : Allocation(
              underlying_allocation->ptr(), underlying_allocation->base_ptr(),
              underlying_allocation->size(), underlying_allocation->place()),
101 102 103 104 105
          allocator_(allocator->shared_from_this()),
          underlying_allocation_(std::move(underlying_allocation)) {}

   private:
    std::shared_ptr<Allocator> allocator_;
106
    DecoratedAllocationPtr underlying_allocation_;
107 108 109 110 111 112 113 114 115 116 117 118
  };

  explicit CUDAGraphAllocator(const std::shared_ptr<Allocator>& allocator)
      : underlying_allocator_(allocator) {}

 public:
  static std::shared_ptr<Allocator> Create(
      const std::shared_ptr<Allocator>& allocator) {
    return std::shared_ptr<Allocator>(new CUDAGraphAllocator(allocator));
  }

 protected:
119
  pten::Allocation* AllocateImpl(size_t size) {
120
    VLOG(10) << "Allocate " << size << " for CUDA Graph";
121 122 123
    return new PrivateAllocation(this,
                                 static_unique_ptr_cast<Allocation>(
                                     underlying_allocator_->Allocate(size)));
124 125
  }

126
  void FreeImpl(pten::Allocation* allocation) {
127 128 129 130 131 132 133 134 135
    VLOG(10) << "delete for CUDA Graph";
    delete allocation;
  }

 private:
  std::shared_ptr<Allocator> underlying_allocator_;
};
#endif

Y
Yu Yang 已提交
136 137
class AllocatorFacadePrivate {
 public:
138 139
  using AllocatorMap = std::map<platform::Place, std::shared_ptr<Allocator>>;

140 141 142 143 144 145
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  using CUDAAllocatorMap =
      std::map<platform::CUDAPlace,
               std::map<gpuStream_t, std::shared_ptr<Allocator>>>;
#endif

146 147 148
  explicit AllocatorFacadePrivate(bool allow_free_idle_chunk = true) {
    strategy_ = GetAllocatorStrategy();
    switch (strategy_) {
149 150
      case AllocatorStrategy::kNaiveBestFit: {
        InitNaiveBestFitCPUAllocator();
J
jianghaicheng 已提交
151 152 153 154 155
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
156
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
157 158 159 160 161 162
        PADDLE_ENFORCE_EQ(
            FLAGS_use_stream_safe_cuda_allocator, false,
            paddle::platform::errors::Unimplemented(
                "StreamSafeCUDAAllocator is only implemented for auto_growth "
                "strategy, not support naive_best_fit strategy"));

163
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
164 165 166
          InitNaiveBestFitCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
167
#endif
168 169 170 171 172
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
173 174 175 176
#ifdef PADDLE_WITH_ASCEND_CL
        for (int dev_id = 0; dev_id < platform::GetNPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitNPUAllocator(platform::NPUPlace(dev_id));
        }
177
        InitNaiveBestFitNPUPinnedAllocator();
F
fwenguang 已提交
178 179 180 181 182
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
183
#endif
Z
Zeng Jinle 已提交
184 185
        break;
      }
186 187 188

      case AllocatorStrategy::kAutoGrowth: {
        InitNaiveBestFitCPUAllocator();
189 190 191
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
        allow_free_idle_chunk_ = allow_free_idle_chunk;
        if (FLAGS_use_stream_safe_cuda_allocator) {
192
          for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount();
193
               ++dev_id) {
194
            InitStreamSafeCUDAAllocator(platform::CUDAPlace(dev_id), nullptr);
195 196
          }
        } else {
197
          for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount();
198 199 200 201 202 203 204
               ++dev_id) {
            InitAutoGrowthCUDAAllocator(platform::CUDAPlace(dev_id),
                                        allow_free_idle_chunk_);
          }
        }
        InitNaiveBestFitCUDAPinnedAllocator();
#endif
205 206 207 208
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
J
jianghaicheng 已提交
209 210 211 212 213
#endif
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
F
fwenguang 已提交
214 215 216 217 218
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
219
#endif
Z
Zeng Jinle 已提交
220 221
        break;
      }
222

223 224
      case AllocatorStrategy::kThreadLocal: {
        InitNaiveBestFitCPUAllocator();
225 226 227 228 229
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
J
jianghaicheng 已提交
230 231 232 233 234
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
235
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
236 237 238 239 240
        PADDLE_ENFORCE_EQ(
            FLAGS_use_stream_safe_cuda_allocator, false,
            paddle::platform::errors::Unimplemented(
                "StreamSafeCUDAAllocator is only implemented for auto_growth "
                "strategy, not support thread_local strategy"));
241

242
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
243 244 245
          InitThreadLocalCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
F
fwenguang 已提交
246 247 248 249 250
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
251 252 253 254
#endif
        break;
      }

Z
Zeng Jinle 已提交
255
      default: {
256
        PADDLE_THROW(platform::errors::InvalidArgument(
257
            "Unsupported allocator strategy: %d", static_cast<int>(strategy_)));
Z
Zeng Jinle 已提交
258
      }
Y
Yu Yang 已提交
259
    }
Z
Zeng Jinle 已提交
260
    InitZeroSizeAllocators();
261
    InitSystemAllocators();
262 263 264 265 266 267

    if (FLAGS_gpu_allocator_retry_time > 0) {
      WrapCUDARetryAllocator(FLAGS_gpu_allocator_retry_time);
    }

    CheckAllocThreadSafe();
Z
Zeng Jinle 已提交
268 269 270 271
  }

  inline const std::shared_ptr<Allocator>& GetAllocator(
      const platform::Place& place, size_t size) {
272
    VLOG(6) << "GetAllocator"
L
Leo Chen 已提交
273
            << " " << place << " " << size;
274 275
    const auto& allocators =
        (size > 0 ? (UNLIKELY(FLAGS_use_system_allocator) ? system_allocators_
276
                                                          : GetAllocatorMap())
277
                  : zero_size_allocators_);
Z
Zeng Jinle 已提交
278
    auto iter = allocators.find(place);
279 280 281
    PADDLE_ENFORCE_NE(iter, allocators.end(),
                      platform::errors::NotFound(
                          "No allocator found for the place, %s", place));
Z
Zeng Jinle 已提交
282
    return iter->second;
283 284
  }

285 286 287 288
  void* GetBasePtr(const std::shared_ptr<pten::Allocation>& allocation) {
    return static_cast<Allocation*>(allocation.get())->base_ptr();
  }

289
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
290 291 292 293 294 295 296 297 298 299 300
  bool HasCUDAAllocator(const platform::CUDAPlace& place,
                        const gpuStream_t& stream) {
    auto it = cuda_allocators_.find(place);
    if (it == cuda_allocators_.end()) {
      return false;
    }
    const std::map<gpuStream_t, std::shared_ptr<Allocator>>& allocator_map =
        it->second;
    return allocator_map.find(stream) != allocator_map.end();
  }

301 302 303
  const std::shared_ptr<Allocator>& GetAllocator(
      const platform::CUDAPlace& place, const gpuStream_t& stream,
      bool create_if_not_found = false) {
304 305 306 307
    {  // shared_lock_guard
      std::shared_lock<std::shared_timed_mutex> lock_guard(
          cuda_allocator_mutex_);
      if (LIKELY(HasCUDAAllocator(place, stream))) {
308 309
        return cuda_allocators_[place][stream];
      } else {
310 311 312 313 314
        PADDLE_ENFORCE_NE(create_if_not_found, false,
                          platform::errors::NotFound(
                              "No allocator found for stream %s in place %s "
                              "with create_if_not_found = false",
                              stream, place));
315 316 317
      }
    }

318 319 320 321 322
    {  // unique_lock_guard
      std::unique_lock<std::shared_timed_mutex> lock_guard(
          cuda_allocator_mutex_);
      InitStreamSafeCUDAAllocator(place, stream);
      return cuda_allocators_[place][stream];
323
    }
324 325 326 327 328
  }

  gpuStream_t GetDefaultStream(const platform::CUDAPlace& place) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    return static_cast<platform::CUDADeviceContext*>(pool.Get(place))->stream();
329
  }
330

331
  void RecordStream(std::shared_ptr<pten::Allocation> allocation,
332
                    const gpuStream_t& stream) {
333 334 335 336
    if (allocation->size() == 0) {
      return;
    }

337 338 339 340 341 342 343 344 345 346 347
    StreamSafeCUDAAllocation* stream_safe_cuda_allocation =
        dynamic_cast<StreamSafeCUDAAllocation*>(allocation.get());
    PADDLE_ENFORCE_NOT_NULL(stream_safe_cuda_allocation,
                            platform::errors::InvalidArgument(
                                "Failed to dynamic cast %p from Allocation* to "
                                "StreamSafeCUDAAllocation*",
                                allocation.get()));
    stream_safe_cuda_allocation->RecordStream(stream);
  }

  const gpuStream_t& GetStream(
348
      const std::shared_ptr<pten::Allocation>& allocation) const {
349 350 351 352 353 354 355 356
    const StreamSafeCUDAAllocation* stream_safe_cuda_allocation =
        dynamic_cast<const StreamSafeCUDAAllocation*>(allocation.get());
    PADDLE_ENFORCE_NOT_NULL(stream_safe_cuda_allocation,
                            platform::errors::InvalidArgument(
                                "Failed to dynamic cast %p from Allocation* to "
                                "StreamSafeCUDAAllocation*",
                                allocation.get()));
    return stream_safe_cuda_allocation->GetOwningStream();
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
  }

#ifdef PADDLE_WITH_CUDA
  void PrepareMemoryPoolForCUDAGraph(CUDAGraphID id) {
    PADDLE_ENFORCE_EQ(strategy_, AllocatorStrategy::kAutoGrowth,
                      platform::errors::InvalidArgument(
                          "CUDA Graph is only supported when the "
                          "FLAGS_allocator_strategy=\"auto_growth\", but got "
                          "FLAGS_allocator_strategy=\"%s\"",
                          FLAGS_allocator_strategy));
    auto& allocator = cuda_graph_allocator_map_[id];
    PADDLE_ENFORCE_EQ(
        allocator.get(), nullptr,
        platform::errors::InvalidArgument(
            "The memory pool of the CUDA Graph with ID %d have been prepared.",
            id));
    allocator.reset(
        new AllocatorFacadePrivate(/*allow_free_idle_chunk=*/false));
    for (auto& item : allocator->allocators_) {
      auto& old_allocator = item.second;
      old_allocator = CUDAGraphAllocator::Create(old_allocator);
378
    }
379 380 381 382 383 384 385 386 387 388 389
    VLOG(10) << "Prepare memory pool for CUDA Graph with ID " << id;
  }

  void RemoveMemoryPoolOfCUDAGraph(CUDAGraphID id) {
    auto iter = cuda_graph_allocator_map_.find(id);
    PADDLE_ENFORCE_NE(iter, cuda_graph_allocator_map_.end(),
                      platform::errors::InvalidArgument(
                          "Cannot find CUDA Graph with ID = %d", id));
    cuda_graph_allocator_map_.erase(iter);
    VLOG(10) << "Remove memory pool of CUDA Graph with ID " << id;
  }
390
#endif
391 392 393 394 395 396 397 398 399
#endif

 private:
  class ZeroSizeAllocator : public Allocator {
   public:
    explicit ZeroSizeAllocator(platform::Place place) : place_(place) {}
    bool IsAllocThreadSafe() const override { return true; }

   protected:
400
    pten::Allocation* AllocateImpl(size_t size) override {
401 402
      return new Allocation(nullptr, 0, place_);
    }
403
    void FreeImpl(pten::Allocation* allocation) override { delete allocation; }
404 405 406 407 408 409 410

   private:
    platform::Place place_;
  };

  const AllocatorMap& GetAllocatorMap() {
#ifdef PADDLE_WITH_CUDA
411
    if (UNLIKELY(platform::CUDAGraph::IsThisThreadCapturing())) {
412 413 414 415 416 417
      auto id = platform::CUDAGraph::CapturingID();
      auto iter = cuda_graph_allocator_map_.find(id);
      PADDLE_ENFORCE_NE(
          iter, cuda_graph_allocator_map_.end(),
          platform::errors::PermissionDenied(
              "No memory pool is prepared for CUDA Graph capturing."));
418
      VLOG(10) << "Choose CUDA Graph memory pool to allocate memory";
419 420 421
      return iter->second->allocators_;
    } else {
      return allocators_;
422
    }
423 424
#else
    return allocators_;
425 426 427
#endif
  }

428 429 430
  void InitNaiveBestFitCPUAllocator() {
    allocators_[platform::CPUPlace()] =
        std::make_shared<NaiveBestFitAllocator>(platform::CPUPlace());
Y
Yu Yang 已提交
431 432
  }

433
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
434 435 436
  void InitNaiveBestFitCUDAPinnedAllocator() {
    allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<NaiveBestFitAllocator>(platform::CUDAPinnedPlace());
437 438
  }

439 440 441 442 443 444 445
  void InitStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
    PADDLE_ENFORCE_EQ(
        strategy_, AllocatorStrategy::kAutoGrowth,
        platform::errors::Unimplemented(
            "Only support auto-growth strategey for StreamSafeCUDAAllocator, "
            "the allocator strategy %d is unsupported for multi-stream",
            static_cast<int>(strategy_)));
446 447 448
    if (LIKELY(!HasCUDAAllocator(p, stream))) {
      VLOG(8) << "Init CUDA allocator for stream " << stream << " in place "
              << p;
449 450 451 452 453 454
      InitAutoGrowthCUDAAllocator(p, stream);
      WrapStreamSafeCUDAAllocator(p, stream);
      WrapCUDARetryAllocator(p, stream, FLAGS_gpu_allocator_retry_time);
    }
  }

455 456
  void InitNaiveBestFitCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
457
  }
Y
Yu Yang 已提交
458

459 460 461 462
  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
#if defined(PADDLE_WITH_HIP)
    auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
463
        cuda_allocator, platform::GpuMinChunkSize(), 0, allow_free_idle_chunk_);
464 465 466 467 468 469 470
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
471
      PADDLE_ENFORCE_GPU_SUCCESS(
472 473
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

474
      PADDLE_ENFORCE_GPU_SUCCESS(
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
          paddle::platform::dynload::cuDeviceGetAttribute(
              &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      cuda_allocators_[p][stream] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
      auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
      cuda_allocators_[p][stream] =
          std::make_shared<AutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(),
              allow_free_idle_chunk_);
    }
#else
    auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }

    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
        underlying_allocator, alignment, 0, allow_free_idle_chunk_);
#endif
#endif
532 533
  }

534
  // NOTE(Ruibiao): Old single-stream version, will be removed later
535 536
  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p,
                                   bool allow_free_idle_chunk) {
537 538 539 540 541 542 543 544 545 546 547
#if defined(PADDLE_WITH_HIP)
    auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
        cuda_allocator, platform::GpuMinChunkSize(), allow_free_idle_chunk);
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
548
      PADDLE_ENFORCE_GPU_SUCCESS(
549 550
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

551
      PADDLE_ENFORCE_GPU_SUCCESS(
552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570
          paddle::platform::dynload::cuDeviceGetAttribute(
              &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      allocators_[p] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
      auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
      allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
          cuda_allocator, platform::GpuMinChunkSize(), allow_free_idle_chunk);
    }

#else
571
    auto cuda_allocator = std::make_shared<CUDAAllocator>(p);
L
Leo Chen 已提交
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }
603
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
L
Leo Chen 已提交
604
        underlying_allocator, alignment, 0, allow_free_idle_chunk);
605 606
#endif
#endif
S
sneaxiy 已提交
607
  }
608 609 610 611 612 613 614

  void InitThreadLocalCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<ThreadLocalCUDAAllocator>(p);
  }

  void WrapStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
    const std::shared_ptr<Allocator>& underlying_allocator =
615
        cuda_allocators_[p][stream];
616 617 618 619 620 621 622 623 624 625
    cuda_allocators_[p][stream] = std::make_shared<StreamSafeCUDAAllocator>(
        underlying_allocator, p, stream);
  }

  void WrapCUDARetryAllocator(platform::CUDAPlace p, gpuStream_t stream,
                              size_t retry_time) {
    PADDLE_ENFORCE_GT(
        retry_time, 0,
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
626
    std::shared_ptr<Allocator> allocator = cuda_allocators_[p][stream];
627 628 629 630 631 632 633 634 635 636 637 638
    allocator = std::make_shared<RetryAllocator>(allocator, retry_time);
  }

  static void CheckCUDAAllocThreadSafe(const CUDAAllocatorMap& allocators) {
    for (auto& place_pair : allocators) {
      for (auto& stream_pair : place_pair.second) {
        PADDLE_ENFORCE_EQ(stream_pair.second->IsAllocThreadSafe(), true,
                          platform::errors::InvalidArgument(
                              "Public allocators must be thread safe"));
      }
    }
  }
639
#endif
S
sneaxiy 已提交
640

641 642 643 644 645 646
#ifdef PADDLE_WITH_XPU
  void InitNaiveBestFitXPUAllocator(platform::XPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

J
jianghaicheng 已提交
647 648 649 650 651 652
#ifdef PADDLE_WITH_IPU
  void InitNaiveBestFitIPUAllocator(platform::IPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

F
fwenguang 已提交
653 654 655 656 657 658
#ifdef PADDLE_WITH_MLU
  void InitNaiveBestFitMLUAllocator(platform::MLUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

659 660 661 662
#ifdef PADDLE_WITH_ASCEND_CL
  void InitNaiveBestFitNPUAllocator(platform::NPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
663 664 665 666 667

  void InitNaiveBestFitNPUPinnedAllocator() {
    allocators_[platform::NPUPinnedPlace()] =
        std::make_shared<paddle::memory::allocation::NPUPinnedAllocator>();
  }
668 669
#endif

670 671 672 673 674 675 676 677
  void InitSystemAllocators() {
    if (!system_allocators_.empty()) return;
    system_allocators_[platform::CPUPlace()] = std::make_shared<CPUAllocator>();
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::XPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
Z
Zeng Jinle 已提交
678
    }
679
#endif
J
jianghaicheng 已提交
680 681 682 683 684 685 686
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::IPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
    }
#endif
687 688 689
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    system_allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<CPUPinnedAllocator>();
690
    int device_count = platform::GetGPUDeviceCount();
691 692 693 694
    for (int i = 0; i < device_count; ++i) {
      platform::CUDAPlace p(i);
      system_allocators_[p] = std::make_shared<CUDAAllocator>(p);
    }
F
fwenguang 已提交
695 696 697 698
#endif
#ifdef PADDLE_WITH_MLU
    int device_count = platform::GetMLUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
699
      platform::MLUPlace p(i);
F
fwenguang 已提交
700 701
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
    }
702 703
#endif
  }
Z
Zeng Jinle 已提交
704 705

  void InitZeroSizeAllocators() {
706
    if (!zero_size_allocators_.empty()) return;
Z
Zeng Jinle 已提交
707 708
    std::vector<platform::Place> places;
    places.emplace_back(platform::CPUPlace());
709
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
710
    int device_count = platform::GetGPUDeviceCount();
Z
Zeng Jinle 已提交
711 712 713 714 715
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::CUDAPlace(dev_id));
    }
    places.emplace_back(platform::CUDAPinnedPlace());
#endif
716 717 718 719 720 721
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::XPUPlace(dev_id));
    }
#endif
722 723 724 725 726 727
#ifdef PADDLE_WITH_ASCEND_CL
    int device_count = platform::GetNPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::NPUPlace(dev_id));
    }
#endif
J
jianghaicheng 已提交
728 729 730 731 732 733
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::IPUPlace(dev_id));
    }
#endif
F
fwenguang 已提交
734 735 736 737 738 739
#ifdef PADDLE_WITH_MLU
    int device_count = platform::GetMLUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::MLUPlace(dev_id));
    }
#endif
Z
Zeng Jinle 已提交
740 741 742

    for (auto& p : places) {
      zero_size_allocators_[p] = std::make_shared<ZeroSizeAllocator>(p);
Y
Yu Yang 已提交
743 744
    }
  }
Z
Zeng Jinle 已提交
745

746 747 748 749 750
  static void CheckAllocThreadSafe(const AllocatorMap& allocators) {
    for (auto& pair : allocators) {
      PADDLE_ENFORCE_EQ(pair.second->IsAllocThreadSafe(), true,
                        platform::errors::InvalidArgument(
                            "Public allocators must be thread safe"));
751
    }
752
  }
753

754 755 756 757
  void CheckAllocThreadSafe() const {
    CheckAllocThreadSafe(allocators_);
    CheckAllocThreadSafe(zero_size_allocators_);
    CheckAllocThreadSafe(system_allocators_);
758 759 760 761 762
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    if (FLAGS_use_stream_safe_cuda_allocator) {
      CheckCUDAAllocThreadSafe(cuda_allocators_);
    }
#endif
763 764
  }

765
  // NOTE(Ruibiao): Old single-stream version, will be removed later
766
  void WrapCUDARetryAllocator(size_t retry_time) {
767 768 769 770
    PADDLE_ENFORCE_GT(
        retry_time, 0,
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
771 772 773 774 775 776 777
    for (auto& pair : allocators_) {
      if (platform::is_gpu_place(pair.first)) {
        pair.second = std::make_shared<RetryAllocator>(pair.second, retry_time);
      }
    }
  }

778 779 780
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  // a standalone CUDA allocator to support multi-stream GC in new executor
  CUDAAllocatorMap cuda_allocators_;
781
  std::shared_timed_mutex cuda_allocator_mutex_;
782 783 784
#ifdef PADDLE_WITH_CUDA
  std::unordered_map<CUDAGraphID, std::unique_ptr<AllocatorFacadePrivate>>
      cuda_graph_allocator_map_;
785
#endif
786 787
#endif
  AllocatorStrategy strategy_;
788
  AllocatorMap allocators_;
789 790
  static AllocatorMap zero_size_allocators_;
  static AllocatorMap system_allocators_;
791
  bool allow_free_idle_chunk_;
792
};
793 794 795 796
AllocatorFacadePrivate::AllocatorMap
    AllocatorFacadePrivate::zero_size_allocators_;
AllocatorFacadePrivate::AllocatorMap AllocatorFacadePrivate::system_allocators_;

Y
Refine  
Yu Yang 已提交
797
// Pimpl. Make interface clean.
798
AllocatorFacade::AllocatorFacade() : m_(new AllocatorFacadePrivate()) {}
799 800 801
// delete m_ may cause core dump when the destructor of python in conflict with
// cpp.
AllocatorFacade::~AllocatorFacade() {}
802 803 804 805 806 807

AllocatorFacade& AllocatorFacade::Instance() {
  static AllocatorFacade instance;
  return instance;
}

808 809 810 811 812
const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
    const platform::Place& place) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (FLAGS_use_stream_safe_cuda_allocator && platform::is_gpu_place(place) &&
      FLAGS_use_system_allocator == false) {
813 814 815 816 817 818 819
#ifdef PADDLE_WITH_CUDA
    if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
      return m_->GetAllocator(place,
                              /* A non-zero num to choose allocator_ */ 1);
    }
#endif

820
    platform::CUDAPlace cuda_place(place.GetDeviceId());
821
    return m_->GetAllocator(cuda_place, m_->GetDefaultStream(cuda_place));
822 823
  }
#endif
824

825 826 827
  return m_->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1);
}

828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
void* AllocatorFacade::GetBasePtr(
    const std::shared_ptr<pten::Allocation>& allocation) {
  PADDLE_ENFORCE_EQ(GetAllocatorStrategy(), AllocatorStrategy::kAutoGrowth,
                    paddle::platform::errors::Unimplemented(
                        "GetBasePtr() is only implemented for auto_growth "
                        "strategy, not support allocator strategy: %d",
                        static_cast<int>(GetAllocatorStrategy())));
  PADDLE_ENFORCE_EQ(platform::is_gpu_place(allocation->place()), true,
                    paddle::platform::errors::Unimplemented(
                        "GetBasePtr() is only implemented for CUDAPlace(), not "
                        "suppot place: %s",
                        allocation->place()));
  return m_->GetBasePtr(allocation);
}

843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
    const platform::Place& place, const gpuStream_t& stream) {
  if (FLAGS_use_stream_safe_cuda_allocator && platform::is_gpu_place(place) &&
      FLAGS_use_system_allocator == false) {
#ifdef PADDLE_WITH_CUDA
    if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
      return m_->GetAllocator(place,
                              /* A non-zero num to choose allocator_ */ 1);
    }
#endif
    return m_->GetAllocator(place, stream, /*create_if_not_found=*/true);
  }
  return m_->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1);
}
#endif

const std::shared_ptr<Allocator>& AllocatorFacade::GetZeroAllocator(
    const platform::Place& place) {
  return m_->GetAllocator(place, /* zero size */ 0);
}

865
std::shared_ptr<pten::Allocation> AllocatorFacade::AllocShared(
866
    const platform::Place& place, size_t size) {
867
  return std::shared_ptr<pten::Allocation>(Alloc(place, size));
868 869
}

870 871
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place,
                                     size_t size) {
872 873 874
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (FLAGS_use_stream_safe_cuda_allocator && platform::is_gpu_place(place) &&
      size > 0 && FLAGS_use_system_allocator == false) {
875 876 877 878 879 880
#ifdef PADDLE_WITH_CUDA
    if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
      return m_->GetAllocator(place, size)->Allocate(size);
    }
#endif

881
    platform::CUDAPlace cuda_place(place.GetDeviceId());
882
    return Alloc(cuda_place, size, m_->GetDefaultStream(cuda_place));
883 884
  }
#endif
885

886
  return m_->GetAllocator(place, size)->Allocate(size);
887 888
}

W
Wilber 已提交
889
uint64_t AllocatorFacade::Release(const platform::Place& place) {
890 891 892
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (FLAGS_use_stream_safe_cuda_allocator && platform::is_gpu_place(place) &&
      FLAGS_use_system_allocator == false) {
893 894 895 896 897 898 899 900
#ifdef PADDLE_WITH_CUDA
    if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
      return m_
          ->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1)
          ->Release(place);
    }
#endif

901
    platform::CUDAPlace cuda_place(place.GetDeviceId());
902
    return Release(cuda_place, m_->GetDefaultStream(cuda_place));
903 904
  }
#endif
W
Wilber 已提交
905
  return m_->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1)
906 907 908
      ->Release(place);
}

909
std::shared_ptr<pten::Allocation> AllocatorFacade::AllocShared(
910 911
    const platform::Place& place, size_t size, const platform::Stream& stream) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
912 913 914 915
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
916 917 918
          "multi-stream 'AllocaShared' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
919 920 921 922 923 924 925

#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
    PADDLE_THROW(platform::errors::Unavailable(
        "Not allow to use StreamSafeCUDAAllocator with CUDAGraphAllocator"));
  }
#endif
926
  gpuStream_t s = reinterpret_cast<gpuStream_t>(stream.id());
927
  return std::shared_ptr<pten::Allocation>(Alloc(place, size, s));
928 929 930
#else
  PADDLE_THROW(platform::errors::PreconditionNotMet("Not compiled with GPU."));
#endif
931 932
}

933
bool AllocatorFacade::InSameStream(
934
    const std::shared_ptr<pten::Allocation>& allocation,
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
    const platform::Stream& stream) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
          "multi-stream 'InSameStream' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));

#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
    PADDLE_THROW(platform::errors::Unavailable(
        "Not allow to use StreamSafeCUDAAllocator with CUDAGraphAllocator"));
  }
#endif
  gpuStream_t s = reinterpret_cast<gpuStream_t>(stream.id());
  return s == GetStream(allocation);
#else
  PADDLE_THROW(platform::errors::PreconditionNotMet("Not compiled with GPU."));
#endif
}

958 959 960
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place, size_t size,
                                     const gpuStream_t& stream) {
961 962 963 964
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
965 966 967
          "multi-stream 'Alloc' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
968 969 970 971 972 973 974 975

#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
    PADDLE_THROW(platform::errors::Unavailable(
        "Not allow to use StreamSafeCUDAAllocator with CUDAGraphAllocator"));
  }
#endif

976
  platform::CUDAPlace p(place.GetDeviceId());
977
  if (LIKELY(size > 0 && FLAGS_use_system_allocator == false)) {
978
    return m_->GetAllocator(p, stream, /* create_if_not_found = */ true)
979 980
        ->Allocate(size);
  } else {
981
    return m_->GetAllocator(p, size)->Allocate(size);
982 983 984 985 986 987 988 989 990
  }
}

uint64_t AllocatorFacade::Release(const platform::CUDAPlace& place,
                                  const gpuStream_t& stream) {
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
991 992 993
          "multi-stream 'Release' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
994 995 996 997 998 999 1000 1001

#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
    PADDLE_THROW(platform::errors::Unavailable(
        "Not allow to use StreamSafeCUDAAllocator with CUDAGraphAllocator"));
  }
#endif

1002 1003 1004
  return m_->GetAllocator(place, stream)->Release(place);
}

1005
void AllocatorFacade::RecordStream(std::shared_ptr<pten::Allocation> allocation,
1006 1007 1008 1009 1010
                                   const gpuStream_t& stream) {
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
1011 1012 1013
          "'RecordStream' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));
1014 1015 1016 1017 1018 1019 1020 1021

#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
    PADDLE_THROW(platform::errors::Unavailable(
        "Not allow to use StreamSafeCUDAAllocator with CUDAGraphAllocator"));
  }
#endif

1022
  m_->RecordStream(allocation, stream);
1023 1024
}

1025
const gpuStream_t& AllocatorFacade::GetStream(
1026
    const std::shared_ptr<pten::Allocation>& allocation) const {
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
  PADDLE_ENFORCE_EQ(
      FLAGS_use_stream_safe_cuda_allocator, true,
      platform::errors::Unimplemented(
          "StreamSafeCUDAAllocator is disabled, you should not call this "
          "'GetStream' function. To enable it, you can enter"
          "'export FLAGS_use_stream_safe_cuda_allocator=true' in the "
          "terminal."));

#ifdef PADDLE_WITH_CUDA
  if (UNLIKELY(platform::CUDAGraph::IsCapturing())) {
    PADDLE_THROW(platform::errors::Unavailable(
        "Not allow to use StreamSafeCUDAAllocator with CUDAGraphAllocator"));
  }
#endif

  return m_->GetStream(allocation);
}

1045 1046 1047 1048 1049 1050 1051 1052 1053
#ifdef PADDLE_WITH_CUDA
void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(CUDAGraphID id) {
  return m_->PrepareMemoryPoolForCUDAGraph(id);
}

void AllocatorFacade::RemoveMemoryPoolOfCUDAGraph(CUDAGraphID id) {
  return m_->RemoveMemoryPoolOfCUDAGraph(id);
}
#endif
1054
#endif
1055 1056 1057
}  // namespace allocation
}  // namespace memory
}  // namespace paddle