allocator_facade.cc 42.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16
#include "paddle/fluid/memory/allocation/allocator_facade.h"

17
#include "gflags/gflags.h"
18
#include "paddle/fluid/memory/allocation/aligned_allocator.h"
19
#include "paddle/fluid/memory/allocation/allocator.h"
Y
Yu Yang 已提交
20
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
21
#include "paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.h"
22
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
23
#include "paddle/fluid/memory/allocation/naive_best_fit_allocator.h"
S
sneaxiy 已提交
24
#include "paddle/fluid/memory/allocation/retry_allocator.h"
25
#include "paddle/fluid/memory/allocation/stat_allocator.h"
S
sneaxiy 已提交
26
#include "paddle/fluid/platform/enforce.h"
27
#include "paddle/fluid/platform/place.h"
28

29
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
30
#include <shared_mutex>
31
#include "paddle/fluid/memory/allocation/cuda_allocator.h"
32
#include "paddle/fluid/memory/allocation/cuda_managed_allocator.h"
S
sneaxiy 已提交
33
#include "paddle/fluid/memory/allocation/pinned_allocator.h"
34
#include "paddle/fluid/memory/allocation/stream_safe_cuda_allocator.h"
35
#include "paddle/fluid/memory/allocation/thread_local_allocator.h"
36
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
37
#include "paddle/fluid/platform/device_context.h"
38
#include "paddle/phi/backends/gpu/gpu_context.h"
39 40

#ifdef PADDLE_WITH_CUDA
41
#include "paddle/fluid/platform/device/gpu/cuda/cuda_graph.h"
42
#endif
43

44 45 46 47 48
#if CUDA_VERSION >= 10020
#include "paddle/fluid/memory/allocation/cuda_virtual_mem_allocator.h"
#include "paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.h"
#include "paddle/fluid/platform/dynload/cuda_driver.h"
#endif
49
#endif
50

51
#ifdef PADDLE_WITH_XPU
52
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
53
#endif
54 55 56 57

#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/memory/allocation/npu_pinned_allocator.h"
#endif
58

J
jianghaicheng 已提交
59 60 61 62
#ifdef PADDLE_WITH_IPU
#include "paddle/fluid/platform/device/ipu/ipu_info.h"
#endif

F
fwenguang 已提交
63 64 65 66
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/mlu_info.h"
#endif

67 68 69 70 71
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#include "paddle/fluid/memory/allocation/custom_allocator.h"
#include "paddle/fluid/platform/device/device_wrapper.h"
#endif

Z
Zeng Jinle 已提交
72
PADDLE_DEFINE_EXPORTED_int64(
73
    gpu_allocator_retry_time, 10000,
S
sneaxiy 已提交
74 75 76
    "The retry time (milliseconds) when allocator fails "
    "to allocate memory. No retry if this value is not greater than 0");

Z
Zeng Jinle 已提交
77 78 79 80
PADDLE_DEFINE_EXPORTED_bool(
    use_system_allocator, false,
    "Whether to use system allocator to allocate CPU and GPU memory. "
    "Only used for unittests.");
81

82 83 84
PADDLE_DEFINE_EXPORTED_bool(use_virtual_memory_auto_growth, false,
                            "Use VirtualMemoryAutoGrowthBestFitAllocator.");

85 86 87
// NOTE(Ruibiao): This FLAGS is just to be compatibled with
// the old single-stream CUDA allocator. It will be removed
// after StreamSafeCudaAllocator has been fully tested.
88
PADDLE_DEFINE_EXPORTED_bool(use_stream_safe_cuda_allocator, true,
89 90
                            "Enable StreamSafeCUDAAllocator");

91 92 93 94 95
PADDLE_DEFINE_EXPORTED_bool(use_cuda_managed_memory, false,
                            "Whether to use CUDAManagedAllocator to allocate "
                            "managed memory, only available for auto_growth "
                            "strategy");

96 97
DECLARE_string(allocator_strategy);

98 99 100 101
namespace paddle {
namespace memory {
namespace allocation {

102 103 104 105 106 107 108 109
#ifdef PADDLE_WITH_CUDA
class CUDAGraphAllocator
    : public Allocator,
      public std::enable_shared_from_this<CUDAGraphAllocator> {
 private:
  class PrivateAllocation : public Allocation {
   public:
    PrivateAllocation(CUDAGraphAllocator* allocator,
110
                      DecoratedAllocationPtr underlying_allocation)
F
From00 已提交
111 112 113
        : Allocation(
              underlying_allocation->ptr(), underlying_allocation->base_ptr(),
              underlying_allocation->size(), underlying_allocation->place()),
114 115 116 117 118
          allocator_(allocator->shared_from_this()),
          underlying_allocation_(std::move(underlying_allocation)) {}

   private:
    std::shared_ptr<Allocator> allocator_;
119
    DecoratedAllocationPtr underlying_allocation_;
120 121 122 123 124 125 126 127 128 129 130 131
  };

  explicit CUDAGraphAllocator(const std::shared_ptr<Allocator>& allocator)
      : underlying_allocator_(allocator) {}

 public:
  static std::shared_ptr<Allocator> Create(
      const std::shared_ptr<Allocator>& allocator) {
    return std::shared_ptr<Allocator>(new CUDAGraphAllocator(allocator));
  }

 protected:
132
  phi::Allocation* AllocateImpl(size_t size) {
133
    VLOG(10) << "Allocate " << size << " for CUDA Graph";
134 135 136
    return new PrivateAllocation(this,
                                 static_unique_ptr_cast<Allocation>(
                                     underlying_allocator_->Allocate(size)));
137 138
  }

139
  void FreeImpl(phi::Allocation* allocation) {
140 141 142 143 144 145 146 147 148
    VLOG(10) << "delete for CUDA Graph";
    delete allocation;
  }

 private:
  std::shared_ptr<Allocator> underlying_allocator_;
};
#endif

149 150 151 152 153 154 155 156
static bool IsCUDAGraphCapturing() {
#ifdef PADDLE_WITH_CUDA
  return UNLIKELY(platform::CUDAGraph::IsThisThreadCapturing());
#else
  return false;
#endif
}

Y
Yu Yang 已提交
157 158
class AllocatorFacadePrivate {
 public:
159 160
  using AllocatorMap = std::map<platform::Place, std::shared_ptr<Allocator>>;

161 162 163 164 165 166
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  using CUDAAllocatorMap =
      std::map<platform::CUDAPlace,
               std::map<gpuStream_t, std::shared_ptr<Allocator>>>;
#endif

167 168
  explicit AllocatorFacadePrivate(bool allow_free_idle_chunk = true) {
    strategy_ = GetAllocatorStrategy();
169 170
    is_stream_safe_cuda_allocator_used_ = false;

171
    switch (strategy_) {
172 173
      case AllocatorStrategy::kNaiveBestFit: {
        InitNaiveBestFitCPUAllocator();
J
jianghaicheng 已提交
174 175 176 177 178
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
179
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
180
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
181 182 183
          InitNaiveBestFitCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
184
#endif
185 186 187 188 189
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
190 191 192 193
#ifdef PADDLE_WITH_ASCEND_CL
        for (int dev_id = 0; dev_id < platform::GetNPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitNPUAllocator(platform::NPUPlace(dev_id));
        }
194
        InitNaiveBestFitNPUPinnedAllocator();
F
fwenguang 已提交
195 196 197 198 199
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
200 201
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
202
        auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
203 204
        for (const auto& dev_type : device_types) {
          for (size_t dev_id = 0;
205
               dev_id < phi::DeviceManager::GetDeviceCount(dev_type);
206 207 208 209 210
               ++dev_id) {
            InitNaiveBestFitCustomDeviceAllocator(
                platform::CustomPlace(dev_type, dev_id));
          }
        }
211
#endif
Z
Zeng Jinle 已提交
212 213
        break;
      }
214 215 216

      case AllocatorStrategy::kAutoGrowth: {
        InitNaiveBestFitCPUAllocator();
217 218
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
        allow_free_idle_chunk_ = allow_free_idle_chunk;
219 220 221 222 223
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
          InitAutoGrowthCUDAAllocator(platform::CUDAPlace(dev_id),
                                      allow_free_idle_chunk_);
        }

224 225 226 227 228 229 230 231 232 233 234 235 236
        // Note(Ruibiao): For GPU multi-stream case without CUDA graph
        // capturing, the 'allocators_' map(place -> Allocator) hold the
        // StreamSafeCUDAAllocator releate to defaultstream (i.e., the stream
        // directly got from DeviceContex), while the 'cuda_allocators_' map
        // (place -> map(stream -> Allocator)) hold the StreamSafeCUDAAllocator
        // releate to non-default stream (i.e., the stream users pass in). The
        // default stream Allocator is built in the structure of
        // AllocatorFacadePrivate, while the non-default stream is build in a
        // manner in GetAllocator function with 'create_if_not_found = ture'.
        // We make special treatment for the default stream for performance
        // reasons. Since most Alloc calls are for default stream in
        // application, treating it separately can avoid lots of overhead of
        // acquiring default stream and applying read-write lock.
237
        if (FLAGS_use_stream_safe_cuda_allocator) {
238 239 240 241
          if (LIKELY(!IsCUDAGraphCapturing())) {
            WrapStreamSafeCUDAAllocatorForDefault();
          }
          is_stream_safe_cuda_allocator_used_ = true;
242
        }
243

244 245
        InitNaiveBestFitCUDAPinnedAllocator();
#endif
246 247 248 249 250 251
#ifdef PADDLE_WITH_ASCEND_CL
        for (int dev_id = 0; dev_id < platform::GetNPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitNPUAllocator(platform::NPUPlace(dev_id));
        }
        InitNaiveBestFitNPUPinnedAllocator();
#endif
252 253 254 255
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
J
jianghaicheng 已提交
256 257 258 259 260
#endif
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
F
fwenguang 已提交
261 262 263 264 265
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
266 267
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
268
        auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
269 270
        for (const auto& dev_type : device_types) {
          for (size_t dev_id = 0;
271
               dev_id < phi::DeviceManager::GetDeviceCount(dev_type);
272 273 274 275 276
               ++dev_id) {
            InitAutoGrowthCustomDeviceAllocator(
                platform::CustomPlace(dev_type, dev_id), allow_free_idle_chunk);
          }
        }
277
#endif
Z
Zeng Jinle 已提交
278 279
        break;
      }
280

281 282
      case AllocatorStrategy::kThreadLocal: {
        InitNaiveBestFitCPUAllocator();
283 284 285 286 287
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
J
jianghaicheng 已提交
288 289 290 291 292
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
293
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
294
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
295 296 297
          InitThreadLocalCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
F
fwenguang 已提交
298 299 300 301 302
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
303 304 305 306
#endif
        break;
      }

Z
Zeng Jinle 已提交
307
      default: {
308
        PADDLE_THROW(platform::errors::InvalidArgument(
309
            "Unsupported allocator strategy: %d", static_cast<int>(strategy_)));
Z
Zeng Jinle 已提交
310
      }
Y
Yu Yang 已提交
311
    }
Z
Zeng Jinle 已提交
312
    InitZeroSizeAllocators();
313
    InitSystemAllocators();
314 315 316 317 318

    if (FLAGS_gpu_allocator_retry_time > 0) {
      WrapCUDARetryAllocator(FLAGS_gpu_allocator_retry_time);
    }

319 320
    WrapStatAllocator();

321
    CheckAllocThreadSafe();
322 323

#ifdef PADDLE_WITH_CUDA
324 325 326
    // No need to wrap CUDAGraphAllocator for StreamSafeCUDAAllocator
    if (!is_stream_safe_cuda_allocator_used_ &&
        UNLIKELY(IsCUDAGraphCapturing())) {
327 328 329
      WrapCUDAGraphAllocator();
    }
#endif
Z
Zeng Jinle 已提交
330 331 332 333
  }

  inline const std::shared_ptr<Allocator>& GetAllocator(
      const platform::Place& place, size_t size) {
334
    VLOG(6) << "GetAllocator"
L
Leo Chen 已提交
335
            << " " << place << " " << size;
336 337
    const auto& allocators =
        (size > 0 ? (UNLIKELY(FLAGS_use_system_allocator) ? system_allocators_
338
                                                          : GetAllocatorMap())
339
                  : zero_size_allocators_);
Z
Zeng Jinle 已提交
340
    auto iter = allocators.find(place);
341 342 343
    PADDLE_ENFORCE_NE(iter, allocators.end(),
                      platform::errors::NotFound(
                          "No allocator found for the place, %s", place));
Z
Zeng Jinle 已提交
344
    return iter->second;
345 346
  }

347
  void* GetBasePtr(const std::shared_ptr<phi::Allocation>& allocation) {
348 349 350
    return static_cast<Allocation*>(allocation.get())->base_ptr();
  }

351 352 353 354 355
  bool IsStreamSafeCUDAAllocatorUsed() {
    return is_stream_safe_cuda_allocator_used_ &&
           LIKELY(FLAGS_use_system_allocator == false);
  }

356
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
357
  bool HasCUDAAllocator(const platform::CUDAPlace& place, gpuStream_t stream) {
358 359 360 361 362 363 364 365 366
    auto it = cuda_allocators_.find(place);
    if (it == cuda_allocators_.end()) {
      return false;
    }
    const std::map<gpuStream_t, std::shared_ptr<Allocator>>& allocator_map =
        it->second;
    return allocator_map.find(stream) != allocator_map.end();
  }

367
  const std::shared_ptr<Allocator>& GetAllocator(
368
      const platform::CUDAPlace& place, gpuStream_t stream,
369
      bool create_if_not_found = false) {
370 371 372 373 374
    if (LIKELY(!IsCUDAGraphCapturing())) {
      if (stream == GetDefaultStream(place)) {
        VLOG(7) << "Get Allocator by passing in a default stream";
        return GetAllocator(place, /* A non-zero num to choose allocator_ */ 1);
      }
375 376 377
    }

    /* shared_lock_guard */ {
378 379 380
      std::shared_lock<std::shared_timed_mutex> lock_guard(
          cuda_allocator_mutex_);
      if (LIKELY(HasCUDAAllocator(place, stream))) {
381 382
        return cuda_allocators_[place][stream];
      } else {
383 384 385 386 387
        PADDLE_ENFORCE_NE(create_if_not_found, false,
                          platform::errors::NotFound(
                              "No allocator found for stream %s in place %s "
                              "with create_if_not_found = false",
                              stream, place));
388 389 390
      }
    }

391
    /* unique_lock_guard */ {
392 393 394 395
      std::unique_lock<std::shared_timed_mutex> lock_guard(
          cuda_allocator_mutex_);
      InitStreamSafeCUDAAllocator(place, stream);
      return cuda_allocators_[place][stream];
396
    }
397 398
  }

399 400 401 402 403 404 405 406 407 408
  const std::shared_ptr<StreamSafeCUDAAllocator>
  GetDefaultStreamSafeCUDAAllocator(const platform::CUDAPlace& place) const {
    const auto iter = default_stream_safe_cuda_allocators_.find(place);
    PADDLE_ENFORCE_NE(
        iter, default_stream_safe_cuda_allocators_.end(),
        platform::errors::NotFound(
            "No StreamSafeCUDAAllocator found for the place, %s", place));
    return iter->second;
  }

409
  gpuStream_t GetDefaultStream(const platform::CUDAPlace& place) const {
410 411 412 413 414
    const std::shared_ptr<StreamSafeCUDAAllocator>& allocator =
        GetDefaultStreamSafeCUDAAllocator(place);
    return allocator->GetDefaultStream();
  }

415
  void SetDefaultStream(const platform::CUDAPlace& place, gpuStream_t stream) {
416 417
    const std::shared_ptr<StreamSafeCUDAAllocator>& allocator =
        GetDefaultStreamSafeCUDAAllocator(place);
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434

    // NOTE(Ruibiao): The default stream will be set when the CUDADeviceContext
    // created. Normally, the DeviceContextPool is a global singleton and one
    // Place only correspond to one DeviceContext. However, to support
    // multi-stream scheduling, standalone executor creates two extra
    // DeviceContextPools for H2D and D2H stream in StreamAnalyzer, which make
    // one Place correspond to multiple DeviceContext and unexpectedly reset the
    // default stream in runtime. To avoid this behavior, we do not allow
    // changing default stream after initially setting.
    if (allocator->GetDefaultStream() != nullptr) {
      VLOG(5) << "The default stream for StreamSafeCUDAAllocator("
              << allocator.get() << ") in " << place << " has been set to "
              << allocator->GetDefaultStream()
              << " before, not allow to change now.";
      return;
    }

435 436 437 438 439 440
    allocator->SetDefaultStream(stream);
    VLOG(8) << "Set default stream to " << stream
            << " for StreamSafeCUDAAllocator(" << allocator.get() << ") in "
            << place;
  }

441
  void RecordStream(std::shared_ptr<phi::Allocation> allocation,
442
                    gpuStream_t stream) {
443 444 445 446 447 448
    std::shared_ptr<StreamSafeCUDAAllocation> stream_safe_cuda_allocation =
        std::dynamic_pointer_cast<StreamSafeCUDAAllocation>(allocation);
    if (stream_safe_cuda_allocation != nullptr) {
      stream_safe_cuda_allocation->RecordStream(stream);
    } else {
      VLOG(6) << "RecordStream for a non-StreamSafeCUDAAllocation";
449
    }
450 451
  }

452
  gpuStream_t GetStream(
453
      const std::shared_ptr<phi::Allocation>& allocation) const {
454 455 456 457 458 459 460 461 462 463 464
    const std::shared_ptr<StreamSafeCUDAAllocation>
        stream_safe_cuda_allocation =
            std::dynamic_pointer_cast<StreamSafeCUDAAllocation>(allocation);
    if (stream_safe_cuda_allocation != nullptr) {
      return stream_safe_cuda_allocation->GetOwningStream();
    }

    VLOG(6) << "GetStream for a non-StreamSafeCUDAAllocation";
    return static_cast<phi::GPUContext*>(
               platform::DeviceContextPool::Instance().Get(allocation->place()))
        ->stream();
465 466 467 468 469 470 471 472 473 474
  }
#endif

 private:
  class ZeroSizeAllocator : public Allocator {
   public:
    explicit ZeroSizeAllocator(platform::Place place) : place_(place) {}
    bool IsAllocThreadSafe() const override { return true; }

   protected:
475
    phi::Allocation* AllocateImpl(size_t size) override {
476 477
      return new Allocation(nullptr, 0, place_);
    }
478
    void FreeImpl(phi::Allocation* allocation) override { delete allocation; }
479 480 481 482 483

   private:
    platform::Place place_;
  };

484
  const AllocatorMap& GetAllocatorMap() { return allocators_; }
485

486 487 488
  void InitNaiveBestFitCPUAllocator() {
    allocators_[platform::CPUPlace()] =
        std::make_shared<NaiveBestFitAllocator>(platform::CPUPlace());
Y
Yu Yang 已提交
489 490
  }

491
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
492 493 494
  void InitNaiveBestFitCUDAPinnedAllocator() {
    allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<NaiveBestFitAllocator>(platform::CUDAPinnedPlace());
495 496
  }

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
  void InitNaiveBestFitCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }

  // Create a new CUDAAllocator or CUDAManagedAllocator for the given device
  std::shared_ptr<Allocator> CreateCUDAAllocator(platform::CUDAPlace p) {
    if (FLAGS_use_cuda_managed_memory) {
      PADDLE_ENFORCE_EQ(
          strategy_, AllocatorStrategy::kAutoGrowth,
          platform::errors::InvalidArgument(
              "CUDA managed memory is only implemented for auto_growth "
              "strategy, not support %s strategy.\n"
              "Please use auto_growth strategy by command `export "
              "FLAGS_allocator_strategy=\"auto_growth\"`, or disable managed "
              "memory by command `export FLAGS_use_cuda_managed_memory=false`",
              FLAGS_allocator_strategy));

      if (!platform::IsGPUManagedMemorySupported(p.device)) {
        PADDLE_THROW(platform::errors::Unavailable(
            "Failed to create CUDAManagedAllocator on GPU %d.\n\n"
            "You have enabled CUDA managed memory, but the gpu device does not "
            "support allocating managed memory.\n"
            "If you don't actually need to use managed memory, please disable "
            "it with command `export FLAGS_use_cuda_managed_memory=false`.\n"
521 522
            "Or you must use the gpu device that supports managed memory.",
            p.device));
523 524 525 526 527 528
      }
      return std::make_shared<CUDAManagedAllocator>(p);
    }
    return std::make_shared<CUDAAllocator>(p);
  }

529 530 531 532 533 534 535
  void InitStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
    PADDLE_ENFORCE_EQ(
        strategy_, AllocatorStrategy::kAutoGrowth,
        platform::errors::Unimplemented(
            "Only support auto-growth strategey for StreamSafeCUDAAllocator, "
            "the allocator strategy %d is unsupported for multi-stream",
            static_cast<int>(strategy_)));
536 537 538
    if (LIKELY(!HasCUDAAllocator(p, stream))) {
      VLOG(8) << "Init CUDA allocator for stream " << stream << " in place "
              << p;
539 540 541
      InitAutoGrowthCUDAAllocator(p, stream);
      WrapStreamSafeCUDAAllocator(p, stream);
      WrapCUDARetryAllocator(p, stream, FLAGS_gpu_allocator_retry_time);
542
      WrapStatAllocator(p, stream);
543 544 545 546 547
    }
  }

  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
#if defined(PADDLE_WITH_HIP)
548
    auto cuda_allocator = CreateCUDAAllocator(p);
549
    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
550
        cuda_allocator, platform::GpuMinChunkSize(), 0, allow_free_idle_chunk_);
551 552 553 554 555 556 557
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
558
      PADDLE_ENFORCE_GPU_SUCCESS(
559 560
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

561
      PADDLE_ENFORCE_GPU_SUCCESS(
562 563 564 565 566 567 568 569 570 571 572 573 574
          paddle::platform::dynload::cuDeviceGetAttribute(
              &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      cuda_allocators_[p][stream] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
575
      auto cuda_allocator = CreateCUDAAllocator(p);
576 577 578 579 580 581
      cuda_allocators_[p][stream] =
          std::make_shared<AutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(),
              allow_free_idle_chunk_);
    }
#else
582
    auto cuda_allocator = CreateCUDAAllocator(p);
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }

    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
        underlying_allocator, alignment, 0, allow_free_idle_chunk_);
#endif
#endif
619 620
  }

621
  // NOTE(Ruibiao): Old single-stream version, will be removed later
622 623
  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p,
                                   bool allow_free_idle_chunk) {
624
#if defined(PADDLE_WITH_HIP)
625
    auto cuda_allocator = CreateCUDAAllocator(p);
626 627 628 629 630 631 632 633 634
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
        cuda_allocator, platform::GpuMinChunkSize(), allow_free_idle_chunk);
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
635
      PADDLE_ENFORCE_GPU_SUCCESS(
636 637
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

638
      PADDLE_ENFORCE_GPU_SUCCESS(
639 640 641 642 643 644 645 646 647 648 649 650 651
          paddle::platform::dynload::cuDeviceGetAttribute(
              &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      allocators_[p] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
652
      auto cuda_allocator = CreateCUDAAllocator(p);
653 654 655 656 657
      allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
          cuda_allocator, platform::GpuMinChunkSize(), allow_free_idle_chunk);
    }

#else
658
    auto cuda_allocator = CreateCUDAAllocator(p);
L
Leo Chen 已提交
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }
690
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
L
Leo Chen 已提交
691
        underlying_allocator, alignment, 0, allow_free_idle_chunk);
692 693
#endif
#endif
S
sneaxiy 已提交
694
  }
695 696 697 698 699 700

  void InitThreadLocalCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<ThreadLocalCUDAAllocator>(p);
  }

  void WrapStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
701 702 703 704
    std::shared_ptr<Allocator>& allocator = cuda_allocators_[p][stream];
    allocator = std::make_shared<StreamSafeCUDAAllocator>(
        allocator, p, stream,
        /* in_cuda_graph_capturing = */ !allow_free_idle_chunk_);
705 706
  }

707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
  void WrapStreamSafeCUDAAllocatorForDefault() {
    for (auto& pair : allocators_) {
      auto& place = pair.first;
      if (platform::is_gpu_place(place)) {
        std::shared_ptr<StreamSafeCUDAAllocator>&& allocator =
            std::make_shared<StreamSafeCUDAAllocator>(
                pair.second, place, /* default_stream = */ nullptr,
                /* in_cuda_graph_capturing = */ !allow_free_idle_chunk_);
        pair.second = allocator;

        // NOTE(Ruibiao): A tricky implement to give StreamSafeCUDAAllocator an
        // ability to interact with the outside world, i.e., change default
        // stream from outside
        default_stream_safe_cuda_allocators_[place] = allocator;
        VLOG(8) << "WrapStreamSafeCUDAAllocator for " << place
                << ", allocator address = " << pair.second.get();
      }
    }
  }

727 728 729 730 731 732
  void WrapCUDARetryAllocator(platform::CUDAPlace p, gpuStream_t stream,
                              size_t retry_time) {
    PADDLE_ENFORCE_GT(
        retry_time, 0,
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
733
    std::shared_ptr<Allocator>& allocator = cuda_allocators_[p][stream];
734 735 736
    allocator = std::make_shared<RetryAllocator>(allocator, retry_time);
  }

737 738 739 740 741
  void WrapStatAllocator(platform::CUDAPlace p, gpuStream_t stream) {
    std::shared_ptr<Allocator>& allocator = cuda_allocators_[p][stream];
    allocator = std::make_shared<StatAllocator>(allocator);
  }

742 743 744 745 746 747 748 749 750
#ifdef PADDLE_WITH_CUDA
  void WrapCUDAGraphAllocator() {
    for (auto& item : allocators_) {
      auto& allocator = item.second;
      allocator = CUDAGraphAllocator::Create(allocator);
    }
  }
#endif

751 752 753 754 755 756 757 758 759
  static void CheckCUDAAllocThreadSafe(const CUDAAllocatorMap& allocators) {
    for (auto& place_pair : allocators) {
      for (auto& stream_pair : place_pair.second) {
        PADDLE_ENFORCE_EQ(stream_pair.second->IsAllocThreadSafe(), true,
                          platform::errors::InvalidArgument(
                              "Public allocators must be thread safe"));
      }
    }
  }
760
#endif
S
sneaxiy 已提交
761

762 763 764 765 766 767
#ifdef PADDLE_WITH_XPU
  void InitNaiveBestFitXPUAllocator(platform::XPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

J
jianghaicheng 已提交
768 769 770 771 772 773
#ifdef PADDLE_WITH_IPU
  void InitNaiveBestFitIPUAllocator(platform::IPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

F
fwenguang 已提交
774 775 776 777 778 779
#ifdef PADDLE_WITH_MLU
  void InitNaiveBestFitMLUAllocator(platform::MLUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

780 781 782 783
#ifdef PADDLE_WITH_ASCEND_CL
  void InitNaiveBestFitNPUAllocator(platform::NPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
784 785 786 787 788

  void InitNaiveBestFitNPUPinnedAllocator() {
    allocators_[platform::NPUPinnedPlace()] =
        std::make_shared<paddle::memory::allocation::NPUPinnedAllocator>();
  }
789 790
#endif

791 792 793 794 795 796 797 798 799 800
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  void InitNaiveBestFitCustomDeviceAllocator(platform::CustomPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }

  void InitAutoGrowthCustomDeviceAllocator(platform::CustomPlace p,
                                           bool allow_free_idle_chunk) {
    auto custom_allocator =
        std::make_shared<paddle::memory::allocation::CustomAllocator>(p);
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
801
        custom_allocator, phi::DeviceManager::GetMinChunkSize(p),
802 803 804 805
        allow_free_idle_chunk);
  }
#endif

806 807 808 809 810 811 812 813
  void InitSystemAllocators() {
    if (!system_allocators_.empty()) return;
    system_allocators_[platform::CPUPlace()] = std::make_shared<CPUAllocator>();
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::XPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
Z
Zeng Jinle 已提交
814
    }
815
#endif
J
jianghaicheng 已提交
816 817 818 819 820 821 822
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::IPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
    }
#endif
823 824 825
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    system_allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<CPUPinnedAllocator>();
826
    int device_count = platform::GetGPUDeviceCount();
827 828
    for (int i = 0; i < device_count; ++i) {
      platform::CUDAPlace p(i);
829
      system_allocators_[p] = CreateCUDAAllocator(p);
830
    }
F
fwenguang 已提交
831 832 833 834
#endif
#ifdef PADDLE_WITH_MLU
    int device_count = platform::GetMLUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
835
      platform::MLUPlace p(i);
F
fwenguang 已提交
836 837
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
    }
838 839
#endif
  }
Z
Zeng Jinle 已提交
840 841

  void InitZeroSizeAllocators() {
842
    if (!zero_size_allocators_.empty()) return;
Z
Zeng Jinle 已提交
843 844
    std::vector<platform::Place> places;
    places.emplace_back(platform::CPUPlace());
845
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
846
    int device_count = platform::GetGPUDeviceCount();
Z
Zeng Jinle 已提交
847 848 849 850 851
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::CUDAPlace(dev_id));
    }
    places.emplace_back(platform::CUDAPinnedPlace());
#endif
852 853 854 855 856 857
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::XPUPlace(dev_id));
    }
#endif
858 859 860 861 862 863
#ifdef PADDLE_WITH_ASCEND_CL
    int device_count = platform::GetNPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::NPUPlace(dev_id));
    }
#endif
J
jianghaicheng 已提交
864 865 866 867 868 869
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::IPUPlace(dev_id));
    }
#endif
F
fwenguang 已提交
870 871 872 873 874 875
#ifdef PADDLE_WITH_MLU
    int device_count = platform::GetMLUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::MLUPlace(dev_id));
    }
#endif
876
#ifdef PADDLE_WITH_CUSTOM_DEVICE
877
    auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
878 879
    for (const auto& dev_type : device_types) {
      for (size_t dev_id = 0;
880
           dev_id < phi::DeviceManager::GetDeviceCount(dev_type); dev_id++) {
881 882 883 884
        places.emplace_back(platform::CustomPlace(dev_type, dev_id));
      }
    }
#endif
Z
Zeng Jinle 已提交
885 886 887

    for (auto& p : places) {
      zero_size_allocators_[p] = std::make_shared<ZeroSizeAllocator>(p);
Y
Yu Yang 已提交
888 889
    }
  }
Z
Zeng Jinle 已提交
890

891 892 893 894 895
  static void CheckAllocThreadSafe(const AllocatorMap& allocators) {
    for (auto& pair : allocators) {
      PADDLE_ENFORCE_EQ(pair.second->IsAllocThreadSafe(), true,
                        platform::errors::InvalidArgument(
                            "Public allocators must be thread safe"));
896
    }
897
  }
898

899 900 901 902
  void CheckAllocThreadSafe() const {
    CheckAllocThreadSafe(allocators_);
    CheckAllocThreadSafe(zero_size_allocators_);
    CheckAllocThreadSafe(system_allocators_);
903
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
904
    if (is_stream_safe_cuda_allocator_used_) {
905 906 907
      CheckCUDAAllocThreadSafe(cuda_allocators_);
    }
#endif
908 909 910
  }

  void WrapCUDARetryAllocator(size_t retry_time) {
911 912 913 914
    PADDLE_ENFORCE_GT(
        retry_time, 0,
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
915 916 917 918 919 920 921
    for (auto& pair : allocators_) {
      if (platform::is_gpu_place(pair.first)) {
        pair.second = std::make_shared<RetryAllocator>(pair.second, retry_time);
      }
    }
  }

922 923 924 925 926 927 928 929 930
  void WrapStatAllocator() {
    for (auto& pair : allocators_) {
      // Now memory stats is only supported for GPU
      if (platform::is_gpu_place(pair.first)) {
        pair.second = std::make_shared<StatAllocator>(pair.second);
      }
    }
  }

931 932
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  // a standalone CUDA allocator to support multi-stream GC in new executor
933 934
  std::map<platform::Place, std::shared_ptr<StreamSafeCUDAAllocator>>
      default_stream_safe_cuda_allocators_;
935
  CUDAAllocatorMap cuda_allocators_;
936
  std::shared_timed_mutex cuda_allocator_mutex_;
937 938
#endif
  AllocatorStrategy strategy_;
939
  AllocatorMap allocators_;
940 941
  static AllocatorMap zero_size_allocators_;
  static AllocatorMap system_allocators_;
942
  bool allow_free_idle_chunk_;
943
  bool is_stream_safe_cuda_allocator_used_;
944
};
945 946 947 948
AllocatorFacadePrivate::AllocatorMap
    AllocatorFacadePrivate::zero_size_allocators_;
AllocatorFacadePrivate::AllocatorMap AllocatorFacadePrivate::system_allocators_;

Y
Refine  
Yu Yang 已提交
949
// Pimpl. Make interface clean.
950
AllocatorFacade::AllocatorFacade() : m_(new AllocatorFacadePrivate()) {}
951 952 953
// delete m_ may cause core dump when the destructor of python in conflict with
// cpp.
AllocatorFacade::~AllocatorFacade() {}
954 955

AllocatorFacade& AllocatorFacade::Instance() {
956 957 958 959 960 961
  static AllocatorFacade* instance = new AllocatorFacade;
  return *instance;
}

AllocatorFacadePrivate* AllocatorFacade::GetPrivate() const {
#ifdef PADDLE_WITH_CUDA
962
  if (UNLIKELY(IsCUDAGraphCapturing())) {
963 964 965 966 967 968 969 970 971 972 973
    auto id = platform::CUDAGraph::CapturingID();
    auto iter = cuda_graph_map_.find(id);
    PADDLE_ENFORCE_NE(
        iter, cuda_graph_map_.end(),
        platform::errors::PermissionDenied(
            "No memory pool is prepared for CUDA Graph capturing."));
    VLOG(10) << "Choose CUDA Graph memory pool";
    return iter->second.get();
  }
#endif
  return m_;
974 975
}

976 977
const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
    const platform::Place& place) {
978 979
  return GetPrivate()->GetAllocator(
      place, /* A non-zero num to choose allocator_ */ 1);
980 981
}

982
void* AllocatorFacade::GetBasePtr(
983
    const std::shared_ptr<phi::Allocation>& allocation) {
984 985 986 987 988 989 990 991 992 993
  PADDLE_ENFORCE_EQ(GetAllocatorStrategy(), AllocatorStrategy::kAutoGrowth,
                    paddle::platform::errors::Unimplemented(
                        "GetBasePtr() is only implemented for auto_growth "
                        "strategy, not support allocator strategy: %d",
                        static_cast<int>(GetAllocatorStrategy())));
  PADDLE_ENFORCE_EQ(platform::is_gpu_place(allocation->place()), true,
                    paddle::platform::errors::Unimplemented(
                        "GetBasePtr() is only implemented for CUDAPlace(), not "
                        "suppot place: %s",
                        allocation->place()));
994
  return GetPrivate()->GetBasePtr(allocation);
995 996
}

997 998
const std::shared_ptr<Allocator>& AllocatorFacade::GetZeroAllocator(
    const platform::Place& place) {
999
  return GetPrivate()->GetAllocator(place, /* zero size */ 0);
1000 1001
}

1002
std::shared_ptr<phi::Allocation> AllocatorFacade::AllocShared(
1003
    const platform::Place& place, size_t size) {
1004
  return std::shared_ptr<phi::Allocation>(Alloc(place, size));
1005 1006
}

1007 1008
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place,
                                     size_t size) {
1009
  return GetPrivate()->GetAllocator(place, size)->Allocate(size);
1010 1011
}

W
Wilber 已提交
1012
uint64_t AllocatorFacade::Release(const platform::Place& place) {
1013 1014
  return GetPrivate()
      ->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1)
1015 1016 1017
      ->Release(place);
}

1018 1019
std::shared_ptr<phi::Allocation> AllocatorFacade::AllocShared(
    const platform::Place& place, size_t size, const phi::Stream& stream) {
1020
  return std::shared_ptr<phi::Allocation>(Alloc(place, size, stream));
1021 1022
}

1023 1024
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place, size_t size,
                                     const phi::Stream& stream) {
1025
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1026 1027 1028 1029 1030
  AllocatorFacadePrivate* m = GetPrivate();
  if (!m->IsStreamSafeCUDAAllocatorUsed()) {
    VLOG(6) << "Warning: StreamSafeCUDAAllocator is not used!";
    return Alloc(place, size);
  }
1031

1032 1033 1034
  platform::CUDAPlace p(place.GetDeviceId());
  if (LIKELY(size > 0 && FLAGS_use_system_allocator == false)) {
    gpuStream_t s = reinterpret_cast<gpuStream_t>(stream.id());
1035
    return m->GetAllocator(p, s, /* create_if_not_found = */ true)
1036 1037
        ->Allocate(size);
  } else {
1038
    return m->GetAllocator(p, size)->Allocate(size);
1039 1040 1041 1042 1043 1044
  }
#else
  PADDLE_THROW(platform::errors::PreconditionNotMet("Not compiled with GPU."));
#endif
}

1045 1046 1047
bool AllocatorFacade::InSameStream(
    const std::shared_ptr<phi::Allocation>& allocation,
    const phi::Stream& stream) {
1048
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1049 1050 1051 1052
  gpuStream_t s = reinterpret_cast<gpuStream_t>(stream.id());
  return s == GetStream(allocation);
#else
  PADDLE_THROW(platform::errors::PreconditionNotMet("Not compiled with GPU."));
1053
#endif
1054 1055
}

1056 1057 1058 1059
bool AllocatorFacade::IsStreamSafeCUDAAllocatorUsed() {
  return GetPrivate()->IsStreamSafeCUDAAllocatorUsed();
}

1060
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1061
uint64_t AllocatorFacade::Release(const platform::CUDAPlace& place,
1062
                                  gpuStream_t stream) {
1063 1064 1065 1066 1067 1068 1069
  AllocatorFacadePrivate* m = GetPrivate();
  if (!m->IsStreamSafeCUDAAllocatorUsed()) {
    VLOG(6) << "Warning: StreamSafeCUDAAllocator is not used!";
    return Release(place);
  }

  return m->GetAllocator(place, stream)->Release(place);
1070 1071
}

1072
void AllocatorFacade::RecordStream(std::shared_ptr<phi::Allocation> allocation,
1073
                                   gpuStream_t stream) {
1074
  GetPrivate()->RecordStream(allocation, stream);
1075 1076
}

1077
const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
1078
    const platform::Place& place, gpuStream_t stream) {
1079 1080 1081 1082 1083
  AllocatorFacadePrivate* m = GetPrivate();

  if (!m->IsStreamSafeCUDAAllocatorUsed()) {
    VLOG(6) << "Warning: StreamSafeCUDAAllocator is not used!";
    return GetAllocator(place);
1084
  }
1085 1086 1087 1088 1089 1090

  if (platform::is_gpu_place(place) && FLAGS_use_system_allocator == false) {
    return m->GetAllocator(place, stream,
                           /*create_if_not_found=*/true);
  }
  return m->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1);
1091 1092
}

1093
gpuStream_t AllocatorFacade::GetStream(
1094
    const std::shared_ptr<phi::Allocation>& allocation) const {
1095
  return GetPrivate()->GetStream(allocation);
1096 1097
}

1098
void AllocatorFacade::SetDefaultStream(const platform::CUDAPlace& place,
1099
                                       gpuStream_t stream) {
1100 1101
  if (m_->IsStreamSafeCUDAAllocatorUsed()) {
    m_->SetDefaultStream(place, stream);
1102 1103 1104
  }
}

1105 1106
#ifdef PADDLE_WITH_CUDA
void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(CUDAGraphID id) {
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
  PADDLE_ENFORCE_EQ(GetAllocatorStrategy(), AllocatorStrategy::kAutoGrowth,
                    platform::errors::InvalidArgument(
                        "CUDA Graph is only supported when the "
                        "FLAGS_allocator_strategy=\"auto_growth\", but got "
                        "FLAGS_allocator_strategy=\"%s\"",
                        FLAGS_allocator_strategy));
  auto& allocator = cuda_graph_map_[id];
  PADDLE_ENFORCE_EQ(
      allocator.get(), nullptr,
      platform::errors::InvalidArgument(
          "The memory pool of the CUDA Graph with ID %d have been prepared.",
          id));
  allocator.reset(new AllocatorFacadePrivate(/*allow_free_idle_chunk=*/false));
1120

1121
  VLOG(10) << "Prepare memory pool for CUDA Graph with ID " << id;
1122 1123 1124
}

void AllocatorFacade::RemoveMemoryPoolOfCUDAGraph(CUDAGraphID id) {
1125 1126 1127 1128 1129 1130
  auto iter = cuda_graph_map_.find(id);
  PADDLE_ENFORCE_NE(iter, cuda_graph_map_.end(),
                    platform::errors::InvalidArgument(
                        "Cannot find CUDA Graph with ID = %d", id));
  cuda_graph_map_.erase(iter);
  VLOG(10) << "Remove memory pool of CUDA Graph with ID " << id;
1131 1132
}
#endif
1133
#endif
1134 1135 1136
}  // namespace allocation
}  // namespace memory
}  // namespace paddle