allocator_facade.cc 43.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16
#include "paddle/fluid/memory/allocation/allocator_facade.h"

17
#include "gflags/gflags.h"
18
#include "paddle/fluid/memory/allocation/aligned_allocator.h"
19
#include "paddle/fluid/memory/allocation/allocator.h"
Y
Yu Yang 已提交
20
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
21
#include "paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.h"
22
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
23
#include "paddle/fluid/memory/allocation/naive_best_fit_allocator.h"
S
sneaxiy 已提交
24
#include "paddle/fluid/memory/allocation/retry_allocator.h"
25
#include "paddle/fluid/memory/allocation/stat_allocator.h"
S
sneaxiy 已提交
26
#include "paddle/fluid/platform/enforce.h"
27
#include "paddle/fluid/platform/place.h"
28
#include "paddle/phi/core/macros.h"
29

30
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
31
#include <shared_mutex>
32

33
#include "paddle/fluid/memory/allocation/cuda_allocator.h"
34
#include "paddle/fluid/memory/allocation/cuda_managed_allocator.h"
S
sneaxiy 已提交
35
#include "paddle/fluid/memory/allocation/pinned_allocator.h"
36
#include "paddle/fluid/memory/allocation/stream_safe_cuda_allocator.h"
37
#include "paddle/fluid/memory/allocation/thread_local_allocator.h"
38
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
39
#include "paddle/fluid/platform/device_context.h"
40
#include "paddle/phi/backends/gpu/gpu_context.h"
41 42

#ifdef PADDLE_WITH_CUDA
43
#include "paddle/phi/backends/gpu/cuda/cuda_graph.h"
44
#endif
45

46 47 48 49 50
#if CUDA_VERSION >= 10020
#include "paddle/fluid/memory/allocation/cuda_virtual_mem_allocator.h"
#include "paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.h"
#include "paddle/fluid/platform/dynload/cuda_driver.h"
#endif
51
#endif
52

53
#ifdef PADDLE_WITH_XPU
54
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
55
#endif
56 57 58 59

#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/memory/allocation/npu_pinned_allocator.h"
#endif
60

J
jianghaicheng 已提交
61 62 63 64
#ifdef PADDLE_WITH_IPU
#include "paddle/fluid/platform/device/ipu/ipu_info.h"
#endif

F
fwenguang 已提交
65 66 67 68
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/mlu_info.h"
#endif

69 70 71 72 73
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#include "paddle/fluid/memory/allocation/custom_allocator.h"
#include "paddle/fluid/platform/device/device_wrapper.h"
#endif

Z
Zeng Jinle 已提交
74
PADDLE_DEFINE_EXPORTED_int64(
75 76
    gpu_allocator_retry_time,
    10000,
S
sneaxiy 已提交
77 78 79
    "The retry time (milliseconds) when allocator fails "
    "to allocate memory. No retry if this value is not greater than 0");

Z
Zeng Jinle 已提交
80
PADDLE_DEFINE_EXPORTED_bool(
81 82
    use_system_allocator,
    false,
Z
Zeng Jinle 已提交
83 84
    "Whether to use system allocator to allocate CPU and GPU memory. "
    "Only used for unittests.");
85

86 87
PADDLE_DEFINE_EXPORTED_bool(use_virtual_memory_auto_growth,
                            false,
88 89
                            "Use VirtualMemoryAutoGrowthBestFitAllocator.");

90 91 92
// NOTE(Ruibiao): This FLAGS is just to be compatibled with
// the old single-stream CUDA allocator. It will be removed
// after StreamSafeCudaAllocator has been fully tested.
93 94
PADDLE_DEFINE_EXPORTED_bool(use_stream_safe_cuda_allocator,
                            true,
95 96
                            "Enable StreamSafeCUDAAllocator");

97 98
PADDLE_DEFINE_EXPORTED_bool(use_cuda_managed_memory,
                            false,
99 100 101 102
                            "Whether to use CUDAManagedAllocator to allocate "
                            "managed memory, only available for auto_growth "
                            "strategy");

103 104
DECLARE_string(allocator_strategy);

105 106 107 108
namespace paddle {
namespace memory {
namespace allocation {

109 110 111 112 113 114 115 116
#ifdef PADDLE_WITH_CUDA
class CUDAGraphAllocator
    : public Allocator,
      public std::enable_shared_from_this<CUDAGraphAllocator> {
 private:
  class PrivateAllocation : public Allocation {
   public:
    PrivateAllocation(CUDAGraphAllocator* allocator,
117
                      DecoratedAllocationPtr underlying_allocation)
118 119 120 121
        : Allocation(underlying_allocation->ptr(),
                     underlying_allocation->base_ptr(),
                     underlying_allocation->size(),
                     underlying_allocation->place()),
122 123 124 125 126
          allocator_(allocator->shared_from_this()),
          underlying_allocation_(std::move(underlying_allocation)) {}

   private:
    std::shared_ptr<Allocator> allocator_;
127
    DecoratedAllocationPtr underlying_allocation_;
128 129 130 131 132 133
  };

  explicit CUDAGraphAllocator(const std::shared_ptr<Allocator>& allocator)
      : underlying_allocator_(allocator) {}

 public:
134 135
  ~CUDAGraphAllocator() { VLOG(10) << "CUDAGraphAllocator destructed"; }

136 137 138 139 140 141
  static std::shared_ptr<Allocator> Create(
      const std::shared_ptr<Allocator>& allocator) {
    return std::shared_ptr<Allocator>(new CUDAGraphAllocator(allocator));
  }

 protected:
142
  phi::Allocation* AllocateImpl(size_t size) {
143
    VLOG(10) << "Allocate " << size << " for CUDA Graph";
144 145 146
    return new PrivateAllocation(this,
                                 static_unique_ptr_cast<Allocation>(
                                     underlying_allocator_->Allocate(size)));
147 148
  }

149
  void FreeImpl(phi::Allocation* allocation) {
150 151 152 153 154 155 156 157 158
    VLOG(10) << "delete for CUDA Graph";
    delete allocation;
  }

 private:
  std::shared_ptr<Allocator> underlying_allocator_;
};
#endif

159 160
static bool IsCUDAGraphCapturing() {
#ifdef PADDLE_WITH_CUDA
161
  return UNLIKELY(phi::backends::gpu::CUDAGraph::IsThisThreadCapturing());
162 163 164 165 166
#else
  return false;
#endif
}

Y
Yu Yang 已提交
167 168
class AllocatorFacadePrivate {
 public:
169 170
  using AllocatorMap = std::map<platform::Place, std::shared_ptr<Allocator>>;

171 172 173 174 175 176
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  using CUDAAllocatorMap =
      std::map<platform::CUDAPlace,
               std::map<gpuStream_t, std::shared_ptr<Allocator>>>;
#endif

177 178
  explicit AllocatorFacadePrivate(bool allow_free_idle_chunk = true) {
    strategy_ = GetAllocatorStrategy();
179 180
    is_stream_safe_cuda_allocator_used_ = false;

181
    switch (strategy_) {
182 183
      case AllocatorStrategy::kNaiveBestFit: {
        InitNaiveBestFitCPUAllocator();
J
jianghaicheng 已提交
184 185 186 187 188
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
189
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
190
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
191 192 193
          InitNaiveBestFitCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
194
#endif
195 196 197 198 199
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
200 201 202 203
#ifdef PADDLE_WITH_ASCEND_CL
        for (int dev_id = 0; dev_id < platform::GetNPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitNPUAllocator(platform::NPUPlace(dev_id));
        }
204
        InitNaiveBestFitNPUPinnedAllocator();
F
fwenguang 已提交
205 206 207 208 209
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
210 211
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
212
        auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
213 214
        for (const auto& dev_type : device_types) {
          for (size_t dev_id = 0;
215
               dev_id < phi::DeviceManager::GetDeviceCount(dev_type);
216 217 218 219 220
               ++dev_id) {
            InitNaiveBestFitCustomDeviceAllocator(
                platform::CustomPlace(dev_type, dev_id));
          }
        }
221
#endif
Z
Zeng Jinle 已提交
222 223
        break;
      }
224 225 226

      case AllocatorStrategy::kAutoGrowth: {
        InitNaiveBestFitCPUAllocator();
227 228
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
        allow_free_idle_chunk_ = allow_free_idle_chunk;
229 230 231 232 233
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
          InitAutoGrowthCUDAAllocator(platform::CUDAPlace(dev_id),
                                      allow_free_idle_chunk_);
        }

234 235 236 237 238 239 240 241
        // Note(Ruibiao): For GPU multi-stream case without CUDA graph
        // capturing, the 'allocators_' map(place -> Allocator) hold the
        // StreamSafeCUDAAllocator releate to defaultstream (i.e., the stream
        // directly got from DeviceContex), while the 'cuda_allocators_' map
        // (place -> map(stream -> Allocator)) hold the StreamSafeCUDAAllocator
        // releate to non-default stream (i.e., the stream users pass in). The
        // default stream Allocator is built in the structure of
        // AllocatorFacadePrivate, while the non-default stream is build in a
242
        // manner in GetAllocator function with 'create_if_not_found = true'.
243 244 245 246
        // We make special treatment for the default stream for performance
        // reasons. Since most Alloc calls are for default stream in
        // application, treating it separately can avoid lots of overhead of
        // acquiring default stream and applying read-write lock.
247
        if (FLAGS_use_stream_safe_cuda_allocator) {
248 249 250 251
          if (LIKELY(!IsCUDAGraphCapturing())) {
            WrapStreamSafeCUDAAllocatorForDefault();
          }
          is_stream_safe_cuda_allocator_used_ = true;
252
        }
253

254 255
        InitNaiveBestFitCUDAPinnedAllocator();
#endif
256 257 258 259 260 261
#ifdef PADDLE_WITH_ASCEND_CL
        for (int dev_id = 0; dev_id < platform::GetNPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitNPUAllocator(platform::NPUPlace(dev_id));
        }
        InitNaiveBestFitNPUPinnedAllocator();
#endif
262 263 264 265
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
J
jianghaicheng 已提交
266 267 268 269 270
#endif
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
F
fwenguang 已提交
271 272 273 274 275
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
276 277
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
278
        auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
279 280
        for (const auto& dev_type : device_types) {
          for (size_t dev_id = 0;
281
               dev_id < phi::DeviceManager::GetDeviceCount(dev_type);
282 283 284 285 286
               ++dev_id) {
            InitAutoGrowthCustomDeviceAllocator(
                platform::CustomPlace(dev_type, dev_id), allow_free_idle_chunk);
          }
        }
287
#endif
Z
Zeng Jinle 已提交
288 289
        break;
      }
290

291 292
      case AllocatorStrategy::kThreadLocal: {
        InitNaiveBestFitCPUAllocator();
293 294 295 296 297
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
J
jianghaicheng 已提交
298 299 300 301 302
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
303
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
304
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
305 306 307
          InitThreadLocalCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
F
fwenguang 已提交
308 309 310 311 312
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
313 314 315 316
#endif
        break;
      }

Z
Zeng Jinle 已提交
317
      default: {
318
        PADDLE_THROW(platform::errors::InvalidArgument(
319
            "Unsupported allocator strategy: %d", static_cast<int>(strategy_)));
Z
Zeng Jinle 已提交
320
      }
Y
Yu Yang 已提交
321
    }
Z
Zeng Jinle 已提交
322
    InitZeroSizeAllocators();
323
    InitSystemAllocators();
324 325 326 327 328

    if (FLAGS_gpu_allocator_retry_time > 0) {
      WrapCUDARetryAllocator(FLAGS_gpu_allocator_retry_time);
    }

329 330
    WrapStatAllocator();

331
    CheckAllocThreadSafe();
332 333

#ifdef PADDLE_WITH_CUDA
334 335 336
    // No need to wrap CUDAGraphAllocator for StreamSafeCUDAAllocator
    if (!is_stream_safe_cuda_allocator_used_ &&
        UNLIKELY(IsCUDAGraphCapturing())) {
337 338 339
      WrapCUDAGraphAllocator();
    }
#endif
Z
Zeng Jinle 已提交
340 341 342 343
  }

  inline const std::shared_ptr<Allocator>& GetAllocator(
      const platform::Place& place, size_t size) {
344
    VLOG(6) << "GetAllocator"
L
Leo Chen 已提交
345
            << " " << place << " " << size;
346 347
    const auto& allocators =
        (size > 0 ? (UNLIKELY(FLAGS_use_system_allocator) ? system_allocators_
348
                                                          : GetAllocatorMap())
349
                  : zero_size_allocators_);
Z
Zeng Jinle 已提交
350
    auto iter = allocators.find(place);
351 352
    PADDLE_ENFORCE_NE(iter,
                      allocators.end(),
353 354
                      platform::errors::NotFound(
                          "No allocator found for the place, %s", place));
Z
Zeng Jinle 已提交
355
    return iter->second;
356 357
  }

358
  void* GetBasePtr(const std::shared_ptr<phi::Allocation>& allocation) {
359 360 361
    return static_cast<Allocation*>(allocation.get())->base_ptr();
  }

362 363 364 365 366
  bool IsStreamSafeCUDAAllocatorUsed() {
    return is_stream_safe_cuda_allocator_used_ &&
           LIKELY(FLAGS_use_system_allocator == false);
  }

367
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
368
  bool HasCUDAAllocator(const platform::CUDAPlace& place, gpuStream_t stream) {
369 370 371 372 373 374 375 376 377
    auto it = cuda_allocators_.find(place);
    if (it == cuda_allocators_.end()) {
      return false;
    }
    const std::map<gpuStream_t, std::shared_ptr<Allocator>>& allocator_map =
        it->second;
    return allocator_map.find(stream) != allocator_map.end();
  }

378
  const std::shared_ptr<Allocator>& GetAllocator(
379 380
      const platform::CUDAPlace& place,
      gpuStream_t stream,
381
      bool create_if_not_found = false) {
382 383 384 385 386
    if (LIKELY(!IsCUDAGraphCapturing())) {
      if (stream == GetDefaultStream(place)) {
        VLOG(7) << "Get Allocator by passing in a default stream";
        return GetAllocator(place, /* A non-zero num to choose allocator_ */ 1);
      }
387 388 389
    }

    /* shared_lock_guard */ {
390 391 392
      std::shared_lock<std::shared_timed_mutex> lock_guard(
          cuda_allocator_mutex_);
      if (LIKELY(HasCUDAAllocator(place, stream))) {
393 394
        return cuda_allocators_[place][stream];
      } else {
395 396
        PADDLE_ENFORCE_NE(create_if_not_found,
                          false,
397 398 399
                          platform::errors::NotFound(
                              "No allocator found for stream %s in place %s "
                              "with create_if_not_found = false",
400 401
                              stream,
                              place));
402 403 404
      }
    }

405
    /* unique_lock_guard */ {
406 407 408 409
      std::unique_lock<std::shared_timed_mutex> lock_guard(
          cuda_allocator_mutex_);
      InitStreamSafeCUDAAllocator(place, stream);
      return cuda_allocators_[place][stream];
410
    }
411 412
  }

413 414 415 416
  const std::shared_ptr<StreamSafeCUDAAllocator>
  GetDefaultStreamSafeCUDAAllocator(const platform::CUDAPlace& place) const {
    const auto iter = default_stream_safe_cuda_allocators_.find(place);
    PADDLE_ENFORCE_NE(
417 418
        iter,
        default_stream_safe_cuda_allocators_.end(),
419 420 421 422 423
        platform::errors::NotFound(
            "No StreamSafeCUDAAllocator found for the place, %s", place));
    return iter->second;
  }

424
  gpuStream_t GetDefaultStream(const platform::CUDAPlace& place) const {
425 426 427 428 429
    const std::shared_ptr<StreamSafeCUDAAllocator>& allocator =
        GetDefaultStreamSafeCUDAAllocator(place);
    return allocator->GetDefaultStream();
  }

430
  void SetDefaultStream(const platform::CUDAPlace& place, gpuStream_t stream) {
431 432
    const std::shared_ptr<StreamSafeCUDAAllocator>& allocator =
        GetDefaultStreamSafeCUDAAllocator(place);
433

434
    PADDLE_ENFORCE_EQ(
435 436
        allocator->GetDefaultStream(),
        nullptr,
437 438 439
        platform::errors::Unavailable(
            "The default stream for StreamSafeCUDAAllocator(%p) in %s has been "
            "set to %p, not allow to change it to %p.",
440 441 442 443
            allocator.get(),
            place,
            allocator->GetDefaultStream(),
            stream));
444

445 446 447 448 449 450
    allocator->SetDefaultStream(stream);
    VLOG(8) << "Set default stream to " << stream
            << " for StreamSafeCUDAAllocator(" << allocator.get() << ") in "
            << place;
  }

451
  void RecordStream(std::shared_ptr<phi::Allocation> allocation,
452
                    gpuStream_t stream) {
453 454 455 456 457 458
    std::shared_ptr<StreamSafeCUDAAllocation> stream_safe_cuda_allocation =
        std::dynamic_pointer_cast<StreamSafeCUDAAllocation>(allocation);
    if (stream_safe_cuda_allocation != nullptr) {
      stream_safe_cuda_allocation->RecordStream(stream);
    } else {
      VLOG(6) << "RecordStream for a non-StreamSafeCUDAAllocation";
459
    }
460 461
  }

462
  gpuStream_t GetStream(
463
      const std::shared_ptr<phi::Allocation>& allocation) const {
464 465 466 467 468 469 470 471 472 473 474
    const std::shared_ptr<StreamSafeCUDAAllocation>
        stream_safe_cuda_allocation =
            std::dynamic_pointer_cast<StreamSafeCUDAAllocation>(allocation);
    if (stream_safe_cuda_allocation != nullptr) {
      return stream_safe_cuda_allocation->GetOwningStream();
    }

    VLOG(6) << "GetStream for a non-StreamSafeCUDAAllocation";
    return static_cast<phi::GPUContext*>(
               platform::DeviceContextPool::Instance().Get(allocation->place()))
        ->stream();
475 476 477 478 479 480 481 482 483 484
  }
#endif

 private:
  class ZeroSizeAllocator : public Allocator {
   public:
    explicit ZeroSizeAllocator(platform::Place place) : place_(place) {}
    bool IsAllocThreadSafe() const override { return true; }

   protected:
485
    phi::Allocation* AllocateImpl(size_t size) override {
486 487
      return new Allocation(nullptr, 0, place_);
    }
488
    void FreeImpl(phi::Allocation* allocation) override { delete allocation; }
489 490 491 492 493

   private:
    platform::Place place_;
  };

494
  const AllocatorMap& GetAllocatorMap() { return allocators_; }
495

496
  void InitNaiveBestFitCPUAllocator() {
497 498
    // It is more efficient to use CPUAllocator directly.
    allocators_[platform::CPUPlace()] = std::make_shared<CPUAllocator>();
Y
Yu Yang 已提交
499 500
  }

501
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
502 503 504
  void InitNaiveBestFitCUDAPinnedAllocator() {
    allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<NaiveBestFitAllocator>(platform::CUDAPinnedPlace());
505 506
  }

507 508 509 510 511 512 513 514
  void InitNaiveBestFitCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }

  // Create a new CUDAAllocator or CUDAManagedAllocator for the given device
  std::shared_ptr<Allocator> CreateCUDAAllocator(platform::CUDAPlace p) {
    if (FLAGS_use_cuda_managed_memory) {
      PADDLE_ENFORCE_EQ(
515 516
          strategy_,
          AllocatorStrategy::kAutoGrowth,
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
          platform::errors::InvalidArgument(
              "CUDA managed memory is only implemented for auto_growth "
              "strategy, not support %s strategy.\n"
              "Please use auto_growth strategy by command `export "
              "FLAGS_allocator_strategy=\"auto_growth\"`, or disable managed "
              "memory by command `export FLAGS_use_cuda_managed_memory=false`",
              FLAGS_allocator_strategy));

      if (!platform::IsGPUManagedMemorySupported(p.device)) {
        PADDLE_THROW(platform::errors::Unavailable(
            "Failed to create CUDAManagedAllocator on GPU %d.\n\n"
            "You have enabled CUDA managed memory, but the gpu device does not "
            "support allocating managed memory.\n"
            "If you don't actually need to use managed memory, please disable "
            "it with command `export FLAGS_use_cuda_managed_memory=false`.\n"
532 533
            "Or you must use the gpu device that supports managed memory.",
            p.device));
534 535 536 537 538 539
      }
      return std::make_shared<CUDAManagedAllocator>(p);
    }
    return std::make_shared<CUDAAllocator>(p);
  }

540 541
  void InitStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
    PADDLE_ENFORCE_EQ(
542 543
        strategy_,
        AllocatorStrategy::kAutoGrowth,
544 545 546 547
        platform::errors::Unimplemented(
            "Only support auto-growth strategey for StreamSafeCUDAAllocator, "
            "the allocator strategy %d is unsupported for multi-stream",
            static_cast<int>(strategy_)));
548 549 550
    if (LIKELY(!HasCUDAAllocator(p, stream))) {
      VLOG(8) << "Init CUDA allocator for stream " << stream << " in place "
              << p;
551 552 553
      InitAutoGrowthCUDAAllocator(p, stream);
      WrapStreamSafeCUDAAllocator(p, stream);
      WrapCUDARetryAllocator(p, stream, FLAGS_gpu_allocator_retry_time);
554
      WrapStatAllocator(p, stream);
555 556 557 558 559
    }
  }

  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
#if defined(PADDLE_WITH_HIP)
560
    auto cuda_allocator = CreateCUDAAllocator(p);
561
    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
562
        cuda_allocator, platform::GpuMinChunkSize(), 0, allow_free_idle_chunk_);
563 564 565 566 567 568 569
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
570
      PADDLE_ENFORCE_GPU_SUCCESS(
571 572
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

573
      PADDLE_ENFORCE_GPU_SUCCESS(
574
          paddle::platform::dynload::cuDeviceGetAttribute(
575 576
              &val,
              CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
577 578 579 580 581 582 583 584 585 586 587
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      cuda_allocators_[p][stream] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
588
      auto cuda_allocator = CreateCUDAAllocator(p);
589 590
      cuda_allocators_[p][stream] =
          std::make_shared<AutoGrowthBestFitAllocator>(
591 592
              cuda_allocator,
              platform::GpuMinChunkSize(),
593
              /*chunk_size=*/0,
594 595 596
              allow_free_idle_chunk_);
    }
#else
597
    auto cuda_allocator = CreateCUDAAllocator(p);
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }

    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
        underlying_allocator, alignment, 0, allow_free_idle_chunk_);
#endif
#endif
634 635
  }

636
  // NOTE(Ruibiao): Old single-stream version, will be removed later
637 638
  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p,
                                   bool allow_free_idle_chunk) {
639
#if defined(PADDLE_WITH_HIP)
640
    auto cuda_allocator = CreateCUDAAllocator(p);
641
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
642 643 644 645
        cuda_allocator,
        platform::GpuMinChunkSize(),
        /*chunk_size=*/0,
        allow_free_idle_chunk);
646 647 648 649 650 651 652
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
653
      PADDLE_ENFORCE_GPU_SUCCESS(
654 655
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

656
      PADDLE_ENFORCE_GPU_SUCCESS(
657
          paddle::platform::dynload::cuDeviceGetAttribute(
658 659
              &val,
              CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
660 661 662 663 664 665 666 667 668 669 670
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      allocators_[p] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
671
      auto cuda_allocator = CreateCUDAAllocator(p);
672
      allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
673 674 675 676
          cuda_allocator,
          platform::GpuMinChunkSize(),
          /*chunk_size=*/0,
          allow_free_idle_chunk);
677 678 679
    }

#else
680
    auto cuda_allocator = CreateCUDAAllocator(p);
L
Leo Chen 已提交
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }
712
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
L
Leo Chen 已提交
713
        underlying_allocator, alignment, 0, allow_free_idle_chunk);
714 715
#endif
#endif
S
sneaxiy 已提交
716
  }
717 718 719 720 721 722

  void InitThreadLocalCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<ThreadLocalCUDAAllocator>(p);
  }

  void WrapStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
723 724
    std::shared_ptr<Allocator>& allocator = cuda_allocators_[p][stream];
    allocator = std::make_shared<StreamSafeCUDAAllocator>(
725 726 727
        allocator,
        p,
        stream,
728
        /* in_cuda_graph_capturing = */ !allow_free_idle_chunk_);
729 730
  }

731 732 733 734 735 736
  void WrapStreamSafeCUDAAllocatorForDefault() {
    for (auto& pair : allocators_) {
      auto& place = pair.first;
      if (platform::is_gpu_place(place)) {
        std::shared_ptr<StreamSafeCUDAAllocator>&& allocator =
            std::make_shared<StreamSafeCUDAAllocator>(
737 738
                pair.second,
                place,
739
                /* default_stream = */ nullptr,
740 741 742 743 744 745 746 747 748 749 750 751 752
                /* in_cuda_graph_capturing = */ !allow_free_idle_chunk_);
        pair.second = allocator;

        // NOTE(Ruibiao): A tricky implement to give StreamSafeCUDAAllocator an
        // ability to interact with the outside world, i.e., change default
        // stream from outside
        default_stream_safe_cuda_allocators_[place] = allocator;
        VLOG(8) << "WrapStreamSafeCUDAAllocator for " << place
                << ", allocator address = " << pair.second.get();
      }
    }
  }

753 754
  void WrapCUDARetryAllocator(platform::CUDAPlace p,
                              gpuStream_t stream,
755 756
                              size_t retry_time) {
    PADDLE_ENFORCE_GT(
757 758
        retry_time,
        0,
759 760
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
761
    std::shared_ptr<Allocator>& allocator = cuda_allocators_[p][stream];
762 763 764
    allocator = std::make_shared<RetryAllocator>(allocator, retry_time);
  }

765 766 767 768 769
  void WrapStatAllocator(platform::CUDAPlace p, gpuStream_t stream) {
    std::shared_ptr<Allocator>& allocator = cuda_allocators_[p][stream];
    allocator = std::make_shared<StatAllocator>(allocator);
  }

770 771 772 773 774 775 776 777 778
#ifdef PADDLE_WITH_CUDA
  void WrapCUDAGraphAllocator() {
    for (auto& item : allocators_) {
      auto& allocator = item.second;
      allocator = CUDAGraphAllocator::Create(allocator);
    }
  }
#endif

779 780 781
  static void CheckCUDAAllocThreadSafe(const CUDAAllocatorMap& allocators) {
    for (auto& place_pair : allocators) {
      for (auto& stream_pair : place_pair.second) {
782 783
        PADDLE_ENFORCE_EQ(stream_pair.second->IsAllocThreadSafe(),
                          true,
784 785 786 787 788
                          platform::errors::InvalidArgument(
                              "Public allocators must be thread safe"));
      }
    }
  }
789
#endif
S
sneaxiy 已提交
790

791 792 793 794 795 796
#ifdef PADDLE_WITH_XPU
  void InitNaiveBestFitXPUAllocator(platform::XPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

J
jianghaicheng 已提交
797 798 799 800 801 802
#ifdef PADDLE_WITH_IPU
  void InitNaiveBestFitIPUAllocator(platform::IPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

F
fwenguang 已提交
803 804 805 806 807 808
#ifdef PADDLE_WITH_MLU
  void InitNaiveBestFitMLUAllocator(platform::MLUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

809 810 811 812
#ifdef PADDLE_WITH_ASCEND_CL
  void InitNaiveBestFitNPUAllocator(platform::NPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
813 814 815 816 817

  void InitNaiveBestFitNPUPinnedAllocator() {
    allocators_[platform::NPUPinnedPlace()] =
        std::make_shared<paddle::memory::allocation::NPUPinnedAllocator>();
  }
818 819
#endif

820 821 822 823 824 825 826 827 828 829
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  void InitNaiveBestFitCustomDeviceAllocator(platform::CustomPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }

  void InitAutoGrowthCustomDeviceAllocator(platform::CustomPlace p,
                                           bool allow_free_idle_chunk) {
    auto custom_allocator =
        std::make_shared<paddle::memory::allocation::CustomAllocator>(p);
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
830 831
        custom_allocator,
        phi::DeviceManager::GetMinChunkSize(p),
832
        /*chunk_size=*/0,
833 834 835 836
        allow_free_idle_chunk);
  }
#endif

837 838 839 840 841 842 843 844
  void InitSystemAllocators() {
    if (!system_allocators_.empty()) return;
    system_allocators_[platform::CPUPlace()] = std::make_shared<CPUAllocator>();
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::XPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
Z
Zeng Jinle 已提交
845
    }
846
#endif
J
jianghaicheng 已提交
847 848 849 850 851 852 853
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::IPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
    }
#endif
854 855 856
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    system_allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<CPUPinnedAllocator>();
857
    int device_count = platform::GetGPUDeviceCount();
858 859
    for (int i = 0; i < device_count; ++i) {
      platform::CUDAPlace p(i);
860
      system_allocators_[p] = CreateCUDAAllocator(p);
861
    }
F
fwenguang 已提交
862 863 864 865
#endif
#ifdef PADDLE_WITH_MLU
    int device_count = platform::GetMLUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
866
      platform::MLUPlace p(i);
F
fwenguang 已提交
867 868
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
    }
869 870 871 872 873
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
    auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
    for (const auto& dev_type : device_types) {
      for (size_t dev_id = 0;
874 875
           dev_id < phi::DeviceManager::GetDeviceCount(dev_type);
           dev_id++) {
876 877 878 879
        platform::CustomPlace p(dev_type, dev_id);
        system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
      }
    }
880 881
#endif
  }
Z
Zeng Jinle 已提交
882 883

  void InitZeroSizeAllocators() {
884
    if (!zero_size_allocators_.empty()) return;
Z
Zeng Jinle 已提交
885 886
    std::vector<platform::Place> places;
    places.emplace_back(platform::CPUPlace());
887
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
888
    int device_count = platform::GetGPUDeviceCount();
Z
Zeng Jinle 已提交
889 890 891 892 893
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::CUDAPlace(dev_id));
    }
    places.emplace_back(platform::CUDAPinnedPlace());
#endif
894 895 896 897 898 899
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::XPUPlace(dev_id));
    }
#endif
900 901 902 903 904 905
#ifdef PADDLE_WITH_ASCEND_CL
    int device_count = platform::GetNPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::NPUPlace(dev_id));
    }
#endif
J
jianghaicheng 已提交
906 907 908 909 910 911
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::IPUPlace(dev_id));
    }
#endif
F
fwenguang 已提交
912 913 914 915 916 917
#ifdef PADDLE_WITH_MLU
    int device_count = platform::GetMLUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::MLUPlace(dev_id));
    }
#endif
918
#ifdef PADDLE_WITH_CUSTOM_DEVICE
919
    auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
920 921
    for (const auto& dev_type : device_types) {
      for (size_t dev_id = 0;
922 923
           dev_id < phi::DeviceManager::GetDeviceCount(dev_type);
           dev_id++) {
924 925 926 927
        places.emplace_back(platform::CustomPlace(dev_type, dev_id));
      }
    }
#endif
Z
Zeng Jinle 已提交
928 929 930

    for (auto& p : places) {
      zero_size_allocators_[p] = std::make_shared<ZeroSizeAllocator>(p);
Y
Yu Yang 已提交
931 932
    }
  }
Z
Zeng Jinle 已提交
933

934 935
  static void CheckAllocThreadSafe(const AllocatorMap& allocators) {
    for (auto& pair : allocators) {
936 937
      PADDLE_ENFORCE_EQ(pair.second->IsAllocThreadSafe(),
                        true,
938 939
                        platform::errors::InvalidArgument(
                            "Public allocators must be thread safe"));
940
    }
941
  }
942

943 944 945 946
  void CheckAllocThreadSafe() const {
    CheckAllocThreadSafe(allocators_);
    CheckAllocThreadSafe(zero_size_allocators_);
    CheckAllocThreadSafe(system_allocators_);
947
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
948
    if (is_stream_safe_cuda_allocator_used_) {
949 950 951
      CheckCUDAAllocThreadSafe(cuda_allocators_);
    }
#endif
952 953 954
  }

  void WrapCUDARetryAllocator(size_t retry_time) {
955
    PADDLE_ENFORCE_GT(
956 957
        retry_time,
        0,
958 959
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
960 961 962 963 964 965 966
    for (auto& pair : allocators_) {
      if (platform::is_gpu_place(pair.first)) {
        pair.second = std::make_shared<RetryAllocator>(pair.second, retry_time);
      }
    }
  }

967 968
  void WrapStatAllocator() {
    for (auto& pair : allocators_) {
969 970 971 972 973 974 975
      // Now memory stats is only supported for CPU and GPU
      const platform::Place& place = pair.first;
      if (platform::is_cpu_place(place) ||
          platform::is_cuda_pinned_place(place) ||
          platform::is_gpu_place(place)) {
        pair.second = std::make_shared<StatAllocator>(pair.second);
      }
976 977 978
    }
  }

979 980
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  // a standalone CUDA allocator to support multi-stream GC in new executor
981 982
  std::map<platform::Place, std::shared_ptr<StreamSafeCUDAAllocator>>
      default_stream_safe_cuda_allocators_;
983
  CUDAAllocatorMap cuda_allocators_;
984
  std::shared_timed_mutex cuda_allocator_mutex_;
985 986
#endif
  AllocatorStrategy strategy_;
987
  AllocatorMap allocators_;
988 989
  static AllocatorMap zero_size_allocators_;
  static AllocatorMap system_allocators_;
990
  bool allow_free_idle_chunk_;
991
  bool is_stream_safe_cuda_allocator_used_;
992
};
993 994 995 996
AllocatorFacadePrivate::AllocatorMap
    AllocatorFacadePrivate::zero_size_allocators_;
AllocatorFacadePrivate::AllocatorMap AllocatorFacadePrivate::system_allocators_;

Y
Refine  
Yu Yang 已提交
997
// Pimpl. Make interface clean.
998
AllocatorFacade::AllocatorFacade() : m_(new AllocatorFacadePrivate()) {}
999 1000 1001
// delete m_ may cause core dump when the destructor of python in conflict with
// cpp.
AllocatorFacade::~AllocatorFacade() {}
1002 1003

AllocatorFacade& AllocatorFacade::Instance() {
1004 1005 1006 1007 1008 1009
  static AllocatorFacade* instance = new AllocatorFacade;
  return *instance;
}

AllocatorFacadePrivate* AllocatorFacade::GetPrivate() const {
#ifdef PADDLE_WITH_CUDA
1010
  if (UNLIKELY(IsCUDAGraphCapturing())) {
1011
    auto id = phi::backends::gpu::CUDAGraph::CapturingPoolID();
1012 1013
    auto iter = cuda_graph_map_.find(id);
    PADDLE_ENFORCE_NE(
1014 1015
        iter,
        cuda_graph_map_.end(),
1016 1017 1018 1019 1020 1021 1022
        platform::errors::PermissionDenied(
            "No memory pool is prepared for CUDA Graph capturing."));
    VLOG(10) << "Choose CUDA Graph memory pool";
    return iter->second.get();
  }
#endif
  return m_;
1023 1024
}

1025 1026
const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
    const platform::Place& place) {
1027 1028
  return GetPrivate()->GetAllocator(
      place, /* A non-zero num to choose allocator_ */ 1);
1029 1030
}

1031
void* AllocatorFacade::GetBasePtr(
1032
    const std::shared_ptr<phi::Allocation>& allocation) {
1033 1034
  PADDLE_ENFORCE_EQ(GetAllocatorStrategy(),
                    AllocatorStrategy::kAutoGrowth,
1035 1036 1037 1038
                    paddle::platform::errors::Unimplemented(
                        "GetBasePtr() is only implemented for auto_growth "
                        "strategy, not support allocator strategy: %d",
                        static_cast<int>(GetAllocatorStrategy())));
1039 1040
  PADDLE_ENFORCE_EQ(platform::is_gpu_place(allocation->place()),
                    true,
1041 1042 1043 1044
                    paddle::platform::errors::Unimplemented(
                        "GetBasePtr() is only implemented for CUDAPlace(), not "
                        "suppot place: %s",
                        allocation->place()));
1045
  return GetPrivate()->GetBasePtr(allocation);
1046 1047
}

1048 1049
const std::shared_ptr<Allocator>& AllocatorFacade::GetZeroAllocator(
    const platform::Place& place) {
1050
  return GetPrivate()->GetAllocator(place, /* zero size */ 0);
1051 1052
}

1053
std::shared_ptr<phi::Allocation> AllocatorFacade::AllocShared(
1054
    const platform::Place& place, size_t size) {
1055
  return std::shared_ptr<phi::Allocation>(Alloc(place, size));
1056 1057
}

1058 1059
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place,
                                     size_t size) {
1060
  return GetPrivate()->GetAllocator(place, size)->Allocate(size);
1061 1062
}

W
Wilber 已提交
1063
uint64_t AllocatorFacade::Release(const platform::Place& place) {
1064 1065
  return GetPrivate()
      ->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1)
1066 1067 1068
      ->Release(place);
}

1069 1070
std::shared_ptr<phi::Allocation> AllocatorFacade::AllocShared(
    const platform::Place& place, size_t size, const phi::Stream& stream) {
1071
  return std::shared_ptr<phi::Allocation>(Alloc(place, size, stream));
1072 1073
}

1074 1075
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place,
                                     size_t size,
1076
                                     const phi::Stream& stream) {
1077
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1078 1079 1080 1081 1082
  AllocatorFacadePrivate* m = GetPrivate();
  if (!m->IsStreamSafeCUDAAllocatorUsed()) {
    VLOG(6) << "Warning: StreamSafeCUDAAllocator is not used!";
    return Alloc(place, size);
  }
1083

1084 1085 1086
  platform::CUDAPlace p(place.GetDeviceId());
  if (LIKELY(size > 0 && FLAGS_use_system_allocator == false)) {
    gpuStream_t s = reinterpret_cast<gpuStream_t>(stream.id());
1087
    return m->GetAllocator(p, s, /* create_if_not_found = */ true)
1088 1089
        ->Allocate(size);
  } else {
1090
    return m->GetAllocator(p, size)->Allocate(size);
1091
  }
1092
#elif defined(PADDLE_WITH_XPU) || defined(PADDLE_WITH_ASCEND_CL)
1093
  return GetAllocator(place)->Allocate(size);
1094
#else
1095 1096
  PADDLE_THROW(platform::errors::PreconditionNotMet(
      "Not compiled with GPU or XPU or NPU."));
1097 1098 1099
#endif
}

1100 1101 1102
bool AllocatorFacade::InSameStream(
    const std::shared_ptr<phi::Allocation>& allocation,
    const phi::Stream& stream) {
1103
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1104 1105 1106 1107
  gpuStream_t s = reinterpret_cast<gpuStream_t>(stream.id());
  return s == GetStream(allocation);
#else
  PADDLE_THROW(platform::errors::PreconditionNotMet("Not compiled with GPU."));
1108
#endif
1109 1110
}

1111 1112 1113 1114
bool AllocatorFacade::IsStreamSafeCUDAAllocatorUsed() {
  return GetPrivate()->IsStreamSafeCUDAAllocatorUsed();
}

1115
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1116
uint64_t AllocatorFacade::Release(const platform::CUDAPlace& place,
1117
                                  gpuStream_t stream) {
1118 1119 1120 1121 1122 1123 1124
  AllocatorFacadePrivate* m = GetPrivate();
  if (!m->IsStreamSafeCUDAAllocatorUsed()) {
    VLOG(6) << "Warning: StreamSafeCUDAAllocator is not used!";
    return Release(place);
  }

  return m->GetAllocator(place, stream)->Release(place);
1125 1126
}

1127
void AllocatorFacade::RecordStream(std::shared_ptr<phi::Allocation> allocation,
1128
                                   gpuStream_t stream) {
1129
  GetPrivate()->RecordStream(allocation, stream);
1130 1131
}

1132
const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
1133
    const platform::Place& place, gpuStream_t stream) {
1134 1135 1136 1137 1138
  AllocatorFacadePrivate* m = GetPrivate();

  if (!m->IsStreamSafeCUDAAllocatorUsed()) {
    VLOG(6) << "Warning: StreamSafeCUDAAllocator is not used!";
    return GetAllocator(place);
1139
  }
1140 1141

  if (platform::is_gpu_place(place) && FLAGS_use_system_allocator == false) {
1142 1143
    return m->GetAllocator(place,
                           stream,
1144 1145 1146
                           /*create_if_not_found=*/true);
  }
  return m->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1);
1147 1148
}

1149
gpuStream_t AllocatorFacade::GetStream(
1150
    const std::shared_ptr<phi::Allocation>& allocation) const {
1151
  return GetPrivate()->GetStream(allocation);
1152 1153
}

1154
void AllocatorFacade::SetDefaultStream(const platform::CUDAPlace& place,
1155
                                       gpuStream_t stream) {
1156 1157
  if (m_->IsStreamSafeCUDAAllocatorUsed()) {
    m_->SetDefaultStream(place, stream);
1158 1159 1160
  }
}

1161
#ifdef PADDLE_WITH_CUDA
1162
void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(int64_t id) {
1163 1164
  PADDLE_ENFORCE_EQ(GetAllocatorStrategy(),
                    AllocatorStrategy::kAutoGrowth,
1165 1166 1167 1168 1169 1170
                    platform::errors::InvalidArgument(
                        "CUDA Graph is only supported when the "
                        "FLAGS_allocator_strategy=\"auto_growth\", but got "
                        "FLAGS_allocator_strategy=\"%s\"",
                        FLAGS_allocator_strategy));
  auto& allocator = cuda_graph_map_[id];
1171 1172 1173 1174 1175 1176 1177 1178 1179
  auto& ref_cnt = cuda_graph_ref_cnt_[id];
  if (allocator.get() == nullptr) {
    allocator.reset(
        new AllocatorFacadePrivate(/*allow_free_idle_chunk=*/false));
    VLOG(10) << "Create memory pool for CUDA Graph with memory ID " << id;
  } else {
    VLOG(10) << "Use created memory pool for CUDA Graph with memory ID " << id;
  }
  ++ref_cnt;
1180 1181
}

1182 1183
void AllocatorFacade::RemoveMemoryPoolOfCUDAGraph(int64_t id) {
  auto ref_cnt_iter = cuda_graph_ref_cnt_.find(id);
1184 1185
  PADDLE_ENFORCE_NE(ref_cnt_iter,
                    cuda_graph_ref_cnt_.end(),
1186
                    platform::errors::InvalidArgument(
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
                        "Cannot find CUDA Graph with memory ID = %d", id));
  auto& ref_cnt = ref_cnt_iter->second;
  --ref_cnt;
  if (ref_cnt == 0) {
    cuda_graph_map_.erase(id);
    cuda_graph_ref_cnt_.erase(ref_cnt_iter);
    VLOG(10) << "Remove memory pool of CUDA Graph with memory ID " << id;
  } else {
    VLOG(10) << "Decrease memory pool ID " << id << " reference count to be "
             << ref_cnt;
  }
1198 1199
}
#endif
1200
#endif
1201 1202 1203 1204

UNUSED static std::shared_ptr<NaiveBestFitAllocator> unused_obj =
    std::make_shared<NaiveBestFitAllocator>(platform::CPUPlace());

1205 1206 1207
}  // namespace allocation
}  // namespace memory
}  // namespace paddle