allocator_facade.cc 42.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16
#include "paddle/fluid/memory/allocation/allocator_facade.h"

17
#include "gflags/gflags.h"
18
#include "paddle/fluid/memory/allocation/aligned_allocator.h"
19
#include "paddle/fluid/memory/allocation/allocator.h"
Y
Yu Yang 已提交
20
#include "paddle/fluid/memory/allocation/allocator_strategy.h"
21
#include "paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.h"
22
#include "paddle/fluid/memory/allocation/cpu_allocator.h"
23
#include "paddle/fluid/memory/allocation/naive_best_fit_allocator.h"
S
sneaxiy 已提交
24
#include "paddle/fluid/memory/allocation/retry_allocator.h"
25
#include "paddle/fluid/memory/allocation/stat_allocator.h"
S
sneaxiy 已提交
26
#include "paddle/fluid/platform/enforce.h"
27
#include "paddle/fluid/platform/place.h"
28

29
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
30
#include <shared_mutex>
31

32
#include "paddle/fluid/memory/allocation/cuda_allocator.h"
33
#include "paddle/fluid/memory/allocation/cuda_managed_allocator.h"
S
sneaxiy 已提交
34
#include "paddle/fluid/memory/allocation/pinned_allocator.h"
35
#include "paddle/fluid/memory/allocation/stream_safe_cuda_allocator.h"
36
#include "paddle/fluid/memory/allocation/thread_local_allocator.h"
37
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
38
#include "paddle/fluid/platform/device_context.h"
39
#include "paddle/phi/backends/gpu/gpu_context.h"
40 41

#ifdef PADDLE_WITH_CUDA
42
#include "paddle/fluid/platform/device/gpu/cuda/cuda_graph.h"
43
#endif
44

45 46 47 48 49
#if CUDA_VERSION >= 10020
#include "paddle/fluid/memory/allocation/cuda_virtual_mem_allocator.h"
#include "paddle/fluid/memory/allocation/virtual_memory_auto_growth_best_fit_allocator.h"
#include "paddle/fluid/platform/dynload/cuda_driver.h"
#endif
50
#endif
51

52
#ifdef PADDLE_WITH_XPU
53
#include "paddle/fluid/platform/device/xpu/xpu_info.h"
54
#endif
55 56 57 58

#ifdef PADDLE_WITH_ASCEND_CL
#include "paddle/fluid/memory/allocation/npu_pinned_allocator.h"
#endif
59

J
jianghaicheng 已提交
60 61 62 63
#ifdef PADDLE_WITH_IPU
#include "paddle/fluid/platform/device/ipu/ipu_info.h"
#endif

F
fwenguang 已提交
64 65 66 67
#ifdef PADDLE_WITH_MLU
#include "paddle/fluid/platform/device/mlu/mlu_info.h"
#endif

68 69 70 71 72
#ifdef PADDLE_WITH_CUSTOM_DEVICE
#include "paddle/fluid/memory/allocation/custom_allocator.h"
#include "paddle/fluid/platform/device/device_wrapper.h"
#endif

Z
Zeng Jinle 已提交
73
PADDLE_DEFINE_EXPORTED_int64(
74
    gpu_allocator_retry_time, 10000,
S
sneaxiy 已提交
75 76 77
    "The retry time (milliseconds) when allocator fails "
    "to allocate memory. No retry if this value is not greater than 0");

Z
Zeng Jinle 已提交
78 79 80 81
PADDLE_DEFINE_EXPORTED_bool(
    use_system_allocator, false,
    "Whether to use system allocator to allocate CPU and GPU memory. "
    "Only used for unittests.");
82

83 84 85
PADDLE_DEFINE_EXPORTED_bool(use_virtual_memory_auto_growth, false,
                            "Use VirtualMemoryAutoGrowthBestFitAllocator.");

86 87 88
// NOTE(Ruibiao): This FLAGS is just to be compatibled with
// the old single-stream CUDA allocator. It will be removed
// after StreamSafeCudaAllocator has been fully tested.
89
PADDLE_DEFINE_EXPORTED_bool(use_stream_safe_cuda_allocator, true,
90 91
                            "Enable StreamSafeCUDAAllocator");

92 93 94 95 96
PADDLE_DEFINE_EXPORTED_bool(use_cuda_managed_memory, false,
                            "Whether to use CUDAManagedAllocator to allocate "
                            "managed memory, only available for auto_growth "
                            "strategy");

97 98
DECLARE_string(allocator_strategy);

99 100 101 102
namespace paddle {
namespace memory {
namespace allocation {

103 104 105 106 107 108 109 110
#ifdef PADDLE_WITH_CUDA
class CUDAGraphAllocator
    : public Allocator,
      public std::enable_shared_from_this<CUDAGraphAllocator> {
 private:
  class PrivateAllocation : public Allocation {
   public:
    PrivateAllocation(CUDAGraphAllocator* allocator,
111
                      DecoratedAllocationPtr underlying_allocation)
F
From00 已提交
112 113 114
        : Allocation(
              underlying_allocation->ptr(), underlying_allocation->base_ptr(),
              underlying_allocation->size(), underlying_allocation->place()),
115 116 117 118 119
          allocator_(allocator->shared_from_this()),
          underlying_allocation_(std::move(underlying_allocation)) {}

   private:
    std::shared_ptr<Allocator> allocator_;
120
    DecoratedAllocationPtr underlying_allocation_;
121 122 123 124 125 126
  };

  explicit CUDAGraphAllocator(const std::shared_ptr<Allocator>& allocator)
      : underlying_allocator_(allocator) {}

 public:
127 128
  ~CUDAGraphAllocator() { VLOG(10) << "CUDAGraphAllocator destructed"; }

129 130 131 132 133 134
  static std::shared_ptr<Allocator> Create(
      const std::shared_ptr<Allocator>& allocator) {
    return std::shared_ptr<Allocator>(new CUDAGraphAllocator(allocator));
  }

 protected:
135
  phi::Allocation* AllocateImpl(size_t size) {
136
    VLOG(10) << "Allocate " << size << " for CUDA Graph";
137 138 139
    return new PrivateAllocation(this,
                                 static_unique_ptr_cast<Allocation>(
                                     underlying_allocator_->Allocate(size)));
140 141
  }

142
  void FreeImpl(phi::Allocation* allocation) {
143 144 145 146 147 148 149 150 151
    VLOG(10) << "delete for CUDA Graph";
    delete allocation;
  }

 private:
  std::shared_ptr<Allocator> underlying_allocator_;
};
#endif

152 153 154 155 156 157 158 159
static bool IsCUDAGraphCapturing() {
#ifdef PADDLE_WITH_CUDA
  return UNLIKELY(platform::CUDAGraph::IsThisThreadCapturing());
#else
  return false;
#endif
}

Y
Yu Yang 已提交
160 161
class AllocatorFacadePrivate {
 public:
162 163
  using AllocatorMap = std::map<platform::Place, std::shared_ptr<Allocator>>;

164 165 166 167 168 169
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  using CUDAAllocatorMap =
      std::map<platform::CUDAPlace,
               std::map<gpuStream_t, std::shared_ptr<Allocator>>>;
#endif

170 171
  explicit AllocatorFacadePrivate(bool allow_free_idle_chunk = true) {
    strategy_ = GetAllocatorStrategy();
172 173
    is_stream_safe_cuda_allocator_used_ = false;

174
    switch (strategy_) {
175 176
      case AllocatorStrategy::kNaiveBestFit: {
        InitNaiveBestFitCPUAllocator();
J
jianghaicheng 已提交
177 178 179 180 181
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
182
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
183
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
184 185 186
          InitNaiveBestFitCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
187
#endif
188 189 190 191 192
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
193 194 195 196
#ifdef PADDLE_WITH_ASCEND_CL
        for (int dev_id = 0; dev_id < platform::GetNPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitNPUAllocator(platform::NPUPlace(dev_id));
        }
197
        InitNaiveBestFitNPUPinnedAllocator();
F
fwenguang 已提交
198 199 200 201 202
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
203 204
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
205
        auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
206 207
        for (const auto& dev_type : device_types) {
          for (size_t dev_id = 0;
208
               dev_id < phi::DeviceManager::GetDeviceCount(dev_type);
209 210 211 212 213
               ++dev_id) {
            InitNaiveBestFitCustomDeviceAllocator(
                platform::CustomPlace(dev_type, dev_id));
          }
        }
214
#endif
Z
Zeng Jinle 已提交
215 216
        break;
      }
217 218 219

      case AllocatorStrategy::kAutoGrowth: {
        InitNaiveBestFitCPUAllocator();
220 221
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
        allow_free_idle_chunk_ = allow_free_idle_chunk;
222 223 224 225 226
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
          InitAutoGrowthCUDAAllocator(platform::CUDAPlace(dev_id),
                                      allow_free_idle_chunk_);
        }

227 228 229 230 231 232 233 234 235 236 237 238 239
        // Note(Ruibiao): For GPU multi-stream case without CUDA graph
        // capturing, the 'allocators_' map(place -> Allocator) hold the
        // StreamSafeCUDAAllocator releate to defaultstream (i.e., the stream
        // directly got from DeviceContex), while the 'cuda_allocators_' map
        // (place -> map(stream -> Allocator)) hold the StreamSafeCUDAAllocator
        // releate to non-default stream (i.e., the stream users pass in). The
        // default stream Allocator is built in the structure of
        // AllocatorFacadePrivate, while the non-default stream is build in a
        // manner in GetAllocator function with 'create_if_not_found = ture'.
        // We make special treatment for the default stream for performance
        // reasons. Since most Alloc calls are for default stream in
        // application, treating it separately can avoid lots of overhead of
        // acquiring default stream and applying read-write lock.
240
        if (FLAGS_use_stream_safe_cuda_allocator) {
241 242 243 244
          if (LIKELY(!IsCUDAGraphCapturing())) {
            WrapStreamSafeCUDAAllocatorForDefault();
          }
          is_stream_safe_cuda_allocator_used_ = true;
245
        }
246

247 248
        InitNaiveBestFitCUDAPinnedAllocator();
#endif
249 250 251 252 253 254
#ifdef PADDLE_WITH_ASCEND_CL
        for (int dev_id = 0; dev_id < platform::GetNPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitNPUAllocator(platform::NPUPlace(dev_id));
        }
        InitNaiveBestFitNPUPinnedAllocator();
#endif
255 256 257 258
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
J
jianghaicheng 已提交
259 260 261 262 263
#endif
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
F
fwenguang 已提交
264 265 266 267 268
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
269 270
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
271
        auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
272 273
        for (const auto& dev_type : device_types) {
          for (size_t dev_id = 0;
274
               dev_id < phi::DeviceManager::GetDeviceCount(dev_type);
275 276 277 278 279
               ++dev_id) {
            InitAutoGrowthCustomDeviceAllocator(
                platform::CustomPlace(dev_type, dev_id), allow_free_idle_chunk);
          }
        }
280
#endif
Z
Zeng Jinle 已提交
281 282
        break;
      }
283

284 285
      case AllocatorStrategy::kThreadLocal: {
        InitNaiveBestFitCPUAllocator();
286 287 288 289 290
#ifdef PADDLE_WITH_XPU
        for (int dev_id = 0; dev_id < platform::GetXPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitXPUAllocator(platform::XPUPlace(dev_id));
        }
#endif
J
jianghaicheng 已提交
291 292 293 294 295
#ifdef PADDLE_WITH_IPU
        for (int dev_id = 0; dev_id < platform::GetIPUDeviceCount(); ++dev_id) {
          InitNaiveBestFitIPUAllocator(platform::IPUPlace(dev_id));
        }
#endif
296
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
297
        for (int dev_id = 0; dev_id < platform::GetGPUDeviceCount(); ++dev_id) {
298 299 300
          InitThreadLocalCUDAAllocator(platform::CUDAPlace(dev_id));
        }
        InitNaiveBestFitCUDAPinnedAllocator();
F
fwenguang 已提交
301 302 303 304 305
#endif
#ifdef PADDLE_WITH_MLU
        for (int dev_id = 0; dev_id < platform::GetMLUDeviceCount(); ++dev_id) {
          InitNaiveBestFitMLUAllocator(platform::MLUPlace(dev_id));
        }
306 307 308 309
#endif
        break;
      }

Z
Zeng Jinle 已提交
310
      default: {
311
        PADDLE_THROW(platform::errors::InvalidArgument(
312
            "Unsupported allocator strategy: %d", static_cast<int>(strategy_)));
Z
Zeng Jinle 已提交
313
      }
Y
Yu Yang 已提交
314
    }
Z
Zeng Jinle 已提交
315
    InitZeroSizeAllocators();
316
    InitSystemAllocators();
317 318 319 320 321

    if (FLAGS_gpu_allocator_retry_time > 0) {
      WrapCUDARetryAllocator(FLAGS_gpu_allocator_retry_time);
    }

322 323
    WrapStatAllocator();

324
    CheckAllocThreadSafe();
325 326

#ifdef PADDLE_WITH_CUDA
327 328 329
    // No need to wrap CUDAGraphAllocator for StreamSafeCUDAAllocator
    if (!is_stream_safe_cuda_allocator_used_ &&
        UNLIKELY(IsCUDAGraphCapturing())) {
330 331 332
      WrapCUDAGraphAllocator();
    }
#endif
Z
Zeng Jinle 已提交
333 334 335 336
  }

  inline const std::shared_ptr<Allocator>& GetAllocator(
      const platform::Place& place, size_t size) {
337
    VLOG(6) << "GetAllocator"
L
Leo Chen 已提交
338
            << " " << place << " " << size;
339 340
    const auto& allocators =
        (size > 0 ? (UNLIKELY(FLAGS_use_system_allocator) ? system_allocators_
341
                                                          : GetAllocatorMap())
342
                  : zero_size_allocators_);
Z
Zeng Jinle 已提交
343
    auto iter = allocators.find(place);
344 345 346
    PADDLE_ENFORCE_NE(iter, allocators.end(),
                      platform::errors::NotFound(
                          "No allocator found for the place, %s", place));
Z
Zeng Jinle 已提交
347
    return iter->second;
348 349
  }

350
  void* GetBasePtr(const std::shared_ptr<phi::Allocation>& allocation) {
351 352 353
    return static_cast<Allocation*>(allocation.get())->base_ptr();
  }

354 355 356 357 358
  bool IsStreamSafeCUDAAllocatorUsed() {
    return is_stream_safe_cuda_allocator_used_ &&
           LIKELY(FLAGS_use_system_allocator == false);
  }

359
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
360
  bool HasCUDAAllocator(const platform::CUDAPlace& place, gpuStream_t stream) {
361 362 363 364 365 366 367 368 369
    auto it = cuda_allocators_.find(place);
    if (it == cuda_allocators_.end()) {
      return false;
    }
    const std::map<gpuStream_t, std::shared_ptr<Allocator>>& allocator_map =
        it->second;
    return allocator_map.find(stream) != allocator_map.end();
  }

370
  const std::shared_ptr<Allocator>& GetAllocator(
371
      const platform::CUDAPlace& place, gpuStream_t stream,
372
      bool create_if_not_found = false) {
373 374 375 376 377
    if (LIKELY(!IsCUDAGraphCapturing())) {
      if (stream == GetDefaultStream(place)) {
        VLOG(7) << "Get Allocator by passing in a default stream";
        return GetAllocator(place, /* A non-zero num to choose allocator_ */ 1);
      }
378 379 380
    }

    /* shared_lock_guard */ {
381 382 383
      std::shared_lock<std::shared_timed_mutex> lock_guard(
          cuda_allocator_mutex_);
      if (LIKELY(HasCUDAAllocator(place, stream))) {
384 385
        return cuda_allocators_[place][stream];
      } else {
386 387 388 389 390
        PADDLE_ENFORCE_NE(create_if_not_found, false,
                          platform::errors::NotFound(
                              "No allocator found for stream %s in place %s "
                              "with create_if_not_found = false",
                              stream, place));
391 392 393
      }
    }

394
    /* unique_lock_guard */ {
395 396 397 398
      std::unique_lock<std::shared_timed_mutex> lock_guard(
          cuda_allocator_mutex_);
      InitStreamSafeCUDAAllocator(place, stream);
      return cuda_allocators_[place][stream];
399
    }
400 401
  }

402 403 404 405 406 407 408 409 410 411
  const std::shared_ptr<StreamSafeCUDAAllocator>
  GetDefaultStreamSafeCUDAAllocator(const platform::CUDAPlace& place) const {
    const auto iter = default_stream_safe_cuda_allocators_.find(place);
    PADDLE_ENFORCE_NE(
        iter, default_stream_safe_cuda_allocators_.end(),
        platform::errors::NotFound(
            "No StreamSafeCUDAAllocator found for the place, %s", place));
    return iter->second;
  }

412
  gpuStream_t GetDefaultStream(const platform::CUDAPlace& place) const {
413 414 415 416 417
    const std::shared_ptr<StreamSafeCUDAAllocator>& allocator =
        GetDefaultStreamSafeCUDAAllocator(place);
    return allocator->GetDefaultStream();
  }

418
  void SetDefaultStream(const platform::CUDAPlace& place, gpuStream_t stream) {
419 420
    const std::shared_ptr<StreamSafeCUDAAllocator>& allocator =
        GetDefaultStreamSafeCUDAAllocator(place);
421

422 423 424 425 426 427
    PADDLE_ENFORCE_EQ(
        allocator->GetDefaultStream(), nullptr,
        platform::errors::Unavailable(
            "The default stream for StreamSafeCUDAAllocator(%p) in %s has been "
            "set to %p, not allow to change it to %p.",
            allocator.get(), place, allocator->GetDefaultStream(), stream));
428

429 430 431 432 433 434
    allocator->SetDefaultStream(stream);
    VLOG(8) << "Set default stream to " << stream
            << " for StreamSafeCUDAAllocator(" << allocator.get() << ") in "
            << place;
  }

435
  void RecordStream(std::shared_ptr<phi::Allocation> allocation,
436
                    gpuStream_t stream) {
437 438 439 440 441 442
    std::shared_ptr<StreamSafeCUDAAllocation> stream_safe_cuda_allocation =
        std::dynamic_pointer_cast<StreamSafeCUDAAllocation>(allocation);
    if (stream_safe_cuda_allocation != nullptr) {
      stream_safe_cuda_allocation->RecordStream(stream);
    } else {
      VLOG(6) << "RecordStream for a non-StreamSafeCUDAAllocation";
443
    }
444 445
  }

446
  gpuStream_t GetStream(
447
      const std::shared_ptr<phi::Allocation>& allocation) const {
448 449 450 451 452 453 454 455 456 457 458
    const std::shared_ptr<StreamSafeCUDAAllocation>
        stream_safe_cuda_allocation =
            std::dynamic_pointer_cast<StreamSafeCUDAAllocation>(allocation);
    if (stream_safe_cuda_allocation != nullptr) {
      return stream_safe_cuda_allocation->GetOwningStream();
    }

    VLOG(6) << "GetStream for a non-StreamSafeCUDAAllocation";
    return static_cast<phi::GPUContext*>(
               platform::DeviceContextPool::Instance().Get(allocation->place()))
        ->stream();
459 460 461 462 463 464 465 466 467 468
  }
#endif

 private:
  class ZeroSizeAllocator : public Allocator {
   public:
    explicit ZeroSizeAllocator(platform::Place place) : place_(place) {}
    bool IsAllocThreadSafe() const override { return true; }

   protected:
469
    phi::Allocation* AllocateImpl(size_t size) override {
470 471
      return new Allocation(nullptr, 0, place_);
    }
472
    void FreeImpl(phi::Allocation* allocation) override { delete allocation; }
473 474 475 476 477

   private:
    platform::Place place_;
  };

478
  const AllocatorMap& GetAllocatorMap() { return allocators_; }
479

480 481 482
  void InitNaiveBestFitCPUAllocator() {
    allocators_[platform::CPUPlace()] =
        std::make_shared<NaiveBestFitAllocator>(platform::CPUPlace());
Y
Yu Yang 已提交
483 484
  }

485
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
486 487 488
  void InitNaiveBestFitCUDAPinnedAllocator() {
    allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<NaiveBestFitAllocator>(platform::CUDAPinnedPlace());
489 490
  }

491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
  void InitNaiveBestFitCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }

  // Create a new CUDAAllocator or CUDAManagedAllocator for the given device
  std::shared_ptr<Allocator> CreateCUDAAllocator(platform::CUDAPlace p) {
    if (FLAGS_use_cuda_managed_memory) {
      PADDLE_ENFORCE_EQ(
          strategy_, AllocatorStrategy::kAutoGrowth,
          platform::errors::InvalidArgument(
              "CUDA managed memory is only implemented for auto_growth "
              "strategy, not support %s strategy.\n"
              "Please use auto_growth strategy by command `export "
              "FLAGS_allocator_strategy=\"auto_growth\"`, or disable managed "
              "memory by command `export FLAGS_use_cuda_managed_memory=false`",
              FLAGS_allocator_strategy));

      if (!platform::IsGPUManagedMemorySupported(p.device)) {
        PADDLE_THROW(platform::errors::Unavailable(
            "Failed to create CUDAManagedAllocator on GPU %d.\n\n"
            "You have enabled CUDA managed memory, but the gpu device does not "
            "support allocating managed memory.\n"
            "If you don't actually need to use managed memory, please disable "
            "it with command `export FLAGS_use_cuda_managed_memory=false`.\n"
515 516
            "Or you must use the gpu device that supports managed memory.",
            p.device));
517 518 519 520 521 522
      }
      return std::make_shared<CUDAManagedAllocator>(p);
    }
    return std::make_shared<CUDAAllocator>(p);
  }

523 524 525 526 527 528 529
  void InitStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
    PADDLE_ENFORCE_EQ(
        strategy_, AllocatorStrategy::kAutoGrowth,
        platform::errors::Unimplemented(
            "Only support auto-growth strategey for StreamSafeCUDAAllocator, "
            "the allocator strategy %d is unsupported for multi-stream",
            static_cast<int>(strategy_)));
530 531 532
    if (LIKELY(!HasCUDAAllocator(p, stream))) {
      VLOG(8) << "Init CUDA allocator for stream " << stream << " in place "
              << p;
533 534 535
      InitAutoGrowthCUDAAllocator(p, stream);
      WrapStreamSafeCUDAAllocator(p, stream);
      WrapCUDARetryAllocator(p, stream, FLAGS_gpu_allocator_retry_time);
536
      WrapStatAllocator(p, stream);
537 538 539 540 541
    }
  }

  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
#if defined(PADDLE_WITH_HIP)
542
    auto cuda_allocator = CreateCUDAAllocator(p);
543
    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
544
        cuda_allocator, platform::GpuMinChunkSize(), 0, allow_free_idle_chunk_);
545 546 547 548 549 550 551
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
552
      PADDLE_ENFORCE_GPU_SUCCESS(
553 554
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

555
      PADDLE_ENFORCE_GPU_SUCCESS(
556 557 558 559 560 561 562 563 564 565 566 567 568
          paddle::platform::dynload::cuDeviceGetAttribute(
              &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      cuda_allocators_[p][stream] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
569
      auto cuda_allocator = CreateCUDAAllocator(p);
570 571 572 573 574 575
      cuda_allocators_[p][stream] =
          std::make_shared<AutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(),
              allow_free_idle_chunk_);
    }
#else
576
    auto cuda_allocator = CreateCUDAAllocator(p);
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }

    cuda_allocators_[p][stream] = std::make_shared<AutoGrowthBestFitAllocator>(
        underlying_allocator, alignment, 0, allow_free_idle_chunk_);
#endif
#endif
613 614
  }

615
  // NOTE(Ruibiao): Old single-stream version, will be removed later
616 617
  void InitAutoGrowthCUDAAllocator(platform::CUDAPlace p,
                                   bool allow_free_idle_chunk) {
618
#if defined(PADDLE_WITH_HIP)
619
    auto cuda_allocator = CreateCUDAAllocator(p);
620 621 622 623 624 625 626 627 628
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
        cuda_allocator, platform::GpuMinChunkSize(), allow_free_idle_chunk);
#endif

#if defined(PADDLE_WITH_CUDA)
#if CUDA_VERSION >= 10020
    CUdevice device;
    int val;
    try {
629
      PADDLE_ENFORCE_GPU_SUCCESS(
630 631
          paddle::platform::dynload::cuDeviceGet(&device, p.GetDeviceId()));

632
      PADDLE_ENFORCE_GPU_SUCCESS(
633 634 635 636 637 638 639 640 641 642 643 644 645
          paddle::platform::dynload::cuDeviceGetAttribute(
              &val, CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED,
              device));
    } catch (...) {
      val = 0;
    }

    if (val > 0 && FLAGS_use_virtual_memory_auto_growth) {
      auto cuda_allocator = std::make_shared<CUDAVirtualMemAllocator>(p);
      allocators_[p] =
          std::make_shared<VirtualMemoryAutoGrowthBestFitAllocator>(
              cuda_allocator, platform::GpuMinChunkSize(), p);
    } else {
646
      auto cuda_allocator = CreateCUDAAllocator(p);
647 648 649 650 651
      allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
          cuda_allocator, platform::GpuMinChunkSize(), allow_free_idle_chunk);
    }

#else
652
    auto cuda_allocator = CreateCUDAAllocator(p);
L
Leo Chen 已提交
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
    auto alignment = platform::GpuMinChunkSize();
    bool need_addr_align = true;
    // NOTE: sometimes, since cuda runtime can not be forked, calling any cuda
    // API in that case may got cuda error(3), i.e.,
    // cudaErrorInitializationError. And, the CUDAAllocator is only initialized
    // but not really used.
    // Here, the try-catch block is added to handle the case that
    // GetDeviceProperties() may failed in the multiple process(for example, in
    // dataloader with num_worker > 0)
    try {
      const auto& prop = platform::GetDeviceProperties(p.GetDeviceId());
      need_addr_align = prop.textureAlignment < alignment;
      VLOG(4) << "GetDeviceProperties ok, textureAlignment: "
              << prop.textureAlignment
              << ", set need_addr_align=" << need_addr_align;
    } catch (...) {
      need_addr_align = true;
      VLOG(4) << "GetDeviceProperties failed, set need_addr_align=true";
    }
    // The address returned is aligned already,
    // ref:
    // https://stackoverflow.com/questions/14082964/cuda-alignment-256bytes-seriously/14083295#14083295
    std::shared_ptr<Allocator> underlying_allocator{nullptr};
    if (need_addr_align) {
      VLOG(10) << "use AlignedAllocator with alignment: " << alignment;
      underlying_allocator =
          std::make_shared<AlignedAllocator>(underlying_allocator, alignment);
    } else {
      VLOG(10) << "not use AlignedAllocator with alignment: " << alignment;
      underlying_allocator = cuda_allocator;
    }
684
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
L
Leo Chen 已提交
685
        underlying_allocator, alignment, 0, allow_free_idle_chunk);
686 687
#endif
#endif
S
sneaxiy 已提交
688
  }
689 690 691 692 693 694

  void InitThreadLocalCUDAAllocator(platform::CUDAPlace p) {
    allocators_[p] = std::make_shared<ThreadLocalCUDAAllocator>(p);
  }

  void WrapStreamSafeCUDAAllocator(platform::CUDAPlace p, gpuStream_t stream) {
695 696 697 698
    std::shared_ptr<Allocator>& allocator = cuda_allocators_[p][stream];
    allocator = std::make_shared<StreamSafeCUDAAllocator>(
        allocator, p, stream,
        /* in_cuda_graph_capturing = */ !allow_free_idle_chunk_);
699 700
  }

701 702 703 704 705 706
  void WrapStreamSafeCUDAAllocatorForDefault() {
    for (auto& pair : allocators_) {
      auto& place = pair.first;
      if (platform::is_gpu_place(place)) {
        std::shared_ptr<StreamSafeCUDAAllocator>&& allocator =
            std::make_shared<StreamSafeCUDAAllocator>(
707 708
                pair.second, place,
                /* default_stream = */ nullptr,
709 710 711 712 713 714 715 716 717 718 719 720 721
                /* in_cuda_graph_capturing = */ !allow_free_idle_chunk_);
        pair.second = allocator;

        // NOTE(Ruibiao): A tricky implement to give StreamSafeCUDAAllocator an
        // ability to interact with the outside world, i.e., change default
        // stream from outside
        default_stream_safe_cuda_allocators_[place] = allocator;
        VLOG(8) << "WrapStreamSafeCUDAAllocator for " << place
                << ", allocator address = " << pair.second.get();
      }
    }
  }

722 723 724 725 726 727
  void WrapCUDARetryAllocator(platform::CUDAPlace p, gpuStream_t stream,
                              size_t retry_time) {
    PADDLE_ENFORCE_GT(
        retry_time, 0,
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
728
    std::shared_ptr<Allocator>& allocator = cuda_allocators_[p][stream];
729 730 731
    allocator = std::make_shared<RetryAllocator>(allocator, retry_time);
  }

732 733 734 735 736
  void WrapStatAllocator(platform::CUDAPlace p, gpuStream_t stream) {
    std::shared_ptr<Allocator>& allocator = cuda_allocators_[p][stream];
    allocator = std::make_shared<StatAllocator>(allocator);
  }

737 738 739 740 741 742 743 744 745
#ifdef PADDLE_WITH_CUDA
  void WrapCUDAGraphAllocator() {
    for (auto& item : allocators_) {
      auto& allocator = item.second;
      allocator = CUDAGraphAllocator::Create(allocator);
    }
  }
#endif

746 747 748 749 750 751 752 753 754
  static void CheckCUDAAllocThreadSafe(const CUDAAllocatorMap& allocators) {
    for (auto& place_pair : allocators) {
      for (auto& stream_pair : place_pair.second) {
        PADDLE_ENFORCE_EQ(stream_pair.second->IsAllocThreadSafe(), true,
                          platform::errors::InvalidArgument(
                              "Public allocators must be thread safe"));
      }
    }
  }
755
#endif
S
sneaxiy 已提交
756

757 758 759 760 761 762
#ifdef PADDLE_WITH_XPU
  void InitNaiveBestFitXPUAllocator(platform::XPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

J
jianghaicheng 已提交
763 764 765 766 767 768
#ifdef PADDLE_WITH_IPU
  void InitNaiveBestFitIPUAllocator(platform::IPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

F
fwenguang 已提交
769 770 771 772 773 774
#ifdef PADDLE_WITH_MLU
  void InitNaiveBestFitMLUAllocator(platform::MLUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
#endif

775 776 777 778
#ifdef PADDLE_WITH_ASCEND_CL
  void InitNaiveBestFitNPUAllocator(platform::NPUPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }
779 780 781 782 783

  void InitNaiveBestFitNPUPinnedAllocator() {
    allocators_[platform::NPUPinnedPlace()] =
        std::make_shared<paddle::memory::allocation::NPUPinnedAllocator>();
  }
784 785
#endif

786 787 788 789 790 791 792 793 794 795
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  void InitNaiveBestFitCustomDeviceAllocator(platform::CustomPlace p) {
    allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
  }

  void InitAutoGrowthCustomDeviceAllocator(platform::CustomPlace p,
                                           bool allow_free_idle_chunk) {
    auto custom_allocator =
        std::make_shared<paddle::memory::allocation::CustomAllocator>(p);
    allocators_[p] = std::make_shared<AutoGrowthBestFitAllocator>(
796
        custom_allocator, phi::DeviceManager::GetMinChunkSize(p),
797 798 799 800
        allow_free_idle_chunk);
  }
#endif

801 802 803 804 805 806 807 808
  void InitSystemAllocators() {
    if (!system_allocators_.empty()) return;
    system_allocators_[platform::CPUPlace()] = std::make_shared<CPUAllocator>();
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::XPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
Z
Zeng Jinle 已提交
809
    }
810
#endif
J
jianghaicheng 已提交
811 812 813 814 815 816 817
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
      platform::IPUPlace p(i);
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
    }
#endif
818 819 820
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    system_allocators_[platform::CUDAPinnedPlace()] =
        std::make_shared<CPUPinnedAllocator>();
821
    int device_count = platform::GetGPUDeviceCount();
822 823
    for (int i = 0; i < device_count; ++i) {
      platform::CUDAPlace p(i);
824
      system_allocators_[p] = CreateCUDAAllocator(p);
825
    }
F
fwenguang 已提交
826 827 828 829
#endif
#ifdef PADDLE_WITH_MLU
    int device_count = platform::GetMLUDeviceCount();
    for (int i = 0; i < device_count; ++i) {
830
      platform::MLUPlace p(i);
F
fwenguang 已提交
831 832
      system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
    }
833 834 835 836 837 838 839 840 841 842
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
    auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
    for (const auto& dev_type : device_types) {
      for (size_t dev_id = 0;
           dev_id < phi::DeviceManager::GetDeviceCount(dev_type); dev_id++) {
        platform::CustomPlace p(dev_type, dev_id);
        system_allocators_[p] = std::make_shared<NaiveBestFitAllocator>(p);
      }
    }
843 844
#endif
  }
Z
Zeng Jinle 已提交
845 846

  void InitZeroSizeAllocators() {
847
    if (!zero_size_allocators_.empty()) return;
Z
Zeng Jinle 已提交
848 849
    std::vector<platform::Place> places;
    places.emplace_back(platform::CPUPlace());
850
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
851
    int device_count = platform::GetGPUDeviceCount();
Z
Zeng Jinle 已提交
852 853 854 855 856
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::CUDAPlace(dev_id));
    }
    places.emplace_back(platform::CUDAPinnedPlace());
#endif
857 858 859 860 861 862
#ifdef PADDLE_WITH_XPU
    int device_count = platform::GetXPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::XPUPlace(dev_id));
    }
#endif
863 864 865 866 867 868
#ifdef PADDLE_WITH_ASCEND_CL
    int device_count = platform::GetNPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::NPUPlace(dev_id));
    }
#endif
J
jianghaicheng 已提交
869 870 871 872 873 874
#ifdef PADDLE_WITH_IPU
    int device_count = platform::GetIPUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::IPUPlace(dev_id));
    }
#endif
F
fwenguang 已提交
875 876 877 878 879 880
#ifdef PADDLE_WITH_MLU
    int device_count = platform::GetMLUDeviceCount();
    for (int dev_id = 0; dev_id < device_count; ++dev_id) {
      places.emplace_back(platform::MLUPlace(dev_id));
    }
#endif
881
#ifdef PADDLE_WITH_CUSTOM_DEVICE
882
    auto device_types = phi::DeviceManager::GetAllCustomDeviceTypes();
883 884
    for (const auto& dev_type : device_types) {
      for (size_t dev_id = 0;
885
           dev_id < phi::DeviceManager::GetDeviceCount(dev_type); dev_id++) {
886 887 888 889
        places.emplace_back(platform::CustomPlace(dev_type, dev_id));
      }
    }
#endif
Z
Zeng Jinle 已提交
890 891 892

    for (auto& p : places) {
      zero_size_allocators_[p] = std::make_shared<ZeroSizeAllocator>(p);
Y
Yu Yang 已提交
893 894
    }
  }
Z
Zeng Jinle 已提交
895

896 897 898 899 900
  static void CheckAllocThreadSafe(const AllocatorMap& allocators) {
    for (auto& pair : allocators) {
      PADDLE_ENFORCE_EQ(pair.second->IsAllocThreadSafe(), true,
                        platform::errors::InvalidArgument(
                            "Public allocators must be thread safe"));
901
    }
902
  }
903

904 905 906 907
  void CheckAllocThreadSafe() const {
    CheckAllocThreadSafe(allocators_);
    CheckAllocThreadSafe(zero_size_allocators_);
    CheckAllocThreadSafe(system_allocators_);
908
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
909
    if (is_stream_safe_cuda_allocator_used_) {
910 911 912
      CheckCUDAAllocThreadSafe(cuda_allocators_);
    }
#endif
913 914 915
  }

  void WrapCUDARetryAllocator(size_t retry_time) {
916 917 918 919
    PADDLE_ENFORCE_GT(
        retry_time, 0,
        platform::errors::InvalidArgument(
            "Retry time should be larger than 0, but got %d", retry_time));
920 921 922 923 924 925 926
    for (auto& pair : allocators_) {
      if (platform::is_gpu_place(pair.first)) {
        pair.second = std::make_shared<RetryAllocator>(pair.second, retry_time);
      }
    }
  }

927 928
  void WrapStatAllocator() {
    for (auto& pair : allocators_) {
929 930 931 932 933 934 935
      // Now memory stats is only supported for CPU and GPU
      const platform::Place& place = pair.first;
      if (platform::is_cpu_place(place) ||
          platform::is_cuda_pinned_place(place) ||
          platform::is_gpu_place(place)) {
        pair.second = std::make_shared<StatAllocator>(pair.second);
      }
936 937 938
    }
  }

939 940
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  // a standalone CUDA allocator to support multi-stream GC in new executor
941 942
  std::map<platform::Place, std::shared_ptr<StreamSafeCUDAAllocator>>
      default_stream_safe_cuda_allocators_;
943
  CUDAAllocatorMap cuda_allocators_;
944
  std::shared_timed_mutex cuda_allocator_mutex_;
945 946
#endif
  AllocatorStrategy strategy_;
947
  AllocatorMap allocators_;
948 949
  static AllocatorMap zero_size_allocators_;
  static AllocatorMap system_allocators_;
950
  bool allow_free_idle_chunk_;
951
  bool is_stream_safe_cuda_allocator_used_;
952
};
953 954 955 956
AllocatorFacadePrivate::AllocatorMap
    AllocatorFacadePrivate::zero_size_allocators_;
AllocatorFacadePrivate::AllocatorMap AllocatorFacadePrivate::system_allocators_;

Y
Refine  
Yu Yang 已提交
957
// Pimpl. Make interface clean.
958
AllocatorFacade::AllocatorFacade() : m_(new AllocatorFacadePrivate()) {}
959 960 961
// delete m_ may cause core dump when the destructor of python in conflict with
// cpp.
AllocatorFacade::~AllocatorFacade() {}
962 963

AllocatorFacade& AllocatorFacade::Instance() {
964 965 966 967 968 969
  static AllocatorFacade* instance = new AllocatorFacade;
  return *instance;
}

AllocatorFacadePrivate* AllocatorFacade::GetPrivate() const {
#ifdef PADDLE_WITH_CUDA
970
  if (UNLIKELY(IsCUDAGraphCapturing())) {
971
    auto id = platform::CUDAGraph::CapturingPoolID();
972 973 974 975 976 977 978 979 980 981
    auto iter = cuda_graph_map_.find(id);
    PADDLE_ENFORCE_NE(
        iter, cuda_graph_map_.end(),
        platform::errors::PermissionDenied(
            "No memory pool is prepared for CUDA Graph capturing."));
    VLOG(10) << "Choose CUDA Graph memory pool";
    return iter->second.get();
  }
#endif
  return m_;
982 983
}

984 985
const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
    const platform::Place& place) {
986 987
  return GetPrivate()->GetAllocator(
      place, /* A non-zero num to choose allocator_ */ 1);
988 989
}

990
void* AllocatorFacade::GetBasePtr(
991
    const std::shared_ptr<phi::Allocation>& allocation) {
992 993 994 995 996 997 998 999 1000 1001
  PADDLE_ENFORCE_EQ(GetAllocatorStrategy(), AllocatorStrategy::kAutoGrowth,
                    paddle::platform::errors::Unimplemented(
                        "GetBasePtr() is only implemented for auto_growth "
                        "strategy, not support allocator strategy: %d",
                        static_cast<int>(GetAllocatorStrategy())));
  PADDLE_ENFORCE_EQ(platform::is_gpu_place(allocation->place()), true,
                    paddle::platform::errors::Unimplemented(
                        "GetBasePtr() is only implemented for CUDAPlace(), not "
                        "suppot place: %s",
                        allocation->place()));
1002
  return GetPrivate()->GetBasePtr(allocation);
1003 1004
}

1005 1006
const std::shared_ptr<Allocator>& AllocatorFacade::GetZeroAllocator(
    const platform::Place& place) {
1007
  return GetPrivate()->GetAllocator(place, /* zero size */ 0);
1008 1009
}

1010
std::shared_ptr<phi::Allocation> AllocatorFacade::AllocShared(
1011
    const platform::Place& place, size_t size) {
1012
  return std::shared_ptr<phi::Allocation>(Alloc(place, size));
1013 1014
}

1015 1016
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place,
                                     size_t size) {
1017
  return GetPrivate()->GetAllocator(place, size)->Allocate(size);
1018 1019
}

W
Wilber 已提交
1020
uint64_t AllocatorFacade::Release(const platform::Place& place) {
1021 1022
  return GetPrivate()
      ->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1)
1023 1024 1025
      ->Release(place);
}

1026 1027
std::shared_ptr<phi::Allocation> AllocatorFacade::AllocShared(
    const platform::Place& place, size_t size, const phi::Stream& stream) {
1028
  return std::shared_ptr<phi::Allocation>(Alloc(place, size, stream));
1029 1030
}

1031 1032
AllocationPtr AllocatorFacade::Alloc(const platform::Place& place, size_t size,
                                     const phi::Stream& stream) {
1033
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1034 1035 1036 1037 1038
  AllocatorFacadePrivate* m = GetPrivate();
  if (!m->IsStreamSafeCUDAAllocatorUsed()) {
    VLOG(6) << "Warning: StreamSafeCUDAAllocator is not used!";
    return Alloc(place, size);
  }
1039

1040 1041 1042
  platform::CUDAPlace p(place.GetDeviceId());
  if (LIKELY(size > 0 && FLAGS_use_system_allocator == false)) {
    gpuStream_t s = reinterpret_cast<gpuStream_t>(stream.id());
1043
    return m->GetAllocator(p, s, /* create_if_not_found = */ true)
1044 1045
        ->Allocate(size);
  } else {
1046
    return m->GetAllocator(p, size)->Allocate(size);
1047
  }
1048 1049
#elif defined PADDLE_WITH_XPU
  return GetAllocator(place)->Allocate(size);
1050
#else
1051 1052
  PADDLE_THROW(
      platform::errors::PreconditionNotMet("Not compiled with GPU or XPU."));
1053 1054 1055
#endif
}

1056 1057 1058
bool AllocatorFacade::InSameStream(
    const std::shared_ptr<phi::Allocation>& allocation,
    const phi::Stream& stream) {
1059
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1060 1061 1062 1063
  gpuStream_t s = reinterpret_cast<gpuStream_t>(stream.id());
  return s == GetStream(allocation);
#else
  PADDLE_THROW(platform::errors::PreconditionNotMet("Not compiled with GPU."));
1064
#endif
1065 1066
}

1067 1068 1069 1070
bool AllocatorFacade::IsStreamSafeCUDAAllocatorUsed() {
  return GetPrivate()->IsStreamSafeCUDAAllocatorUsed();
}

1071
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
1072
uint64_t AllocatorFacade::Release(const platform::CUDAPlace& place,
1073
                                  gpuStream_t stream) {
1074 1075 1076 1077 1078 1079 1080
  AllocatorFacadePrivate* m = GetPrivate();
  if (!m->IsStreamSafeCUDAAllocatorUsed()) {
    VLOG(6) << "Warning: StreamSafeCUDAAllocator is not used!";
    return Release(place);
  }

  return m->GetAllocator(place, stream)->Release(place);
1081 1082
}

1083
void AllocatorFacade::RecordStream(std::shared_ptr<phi::Allocation> allocation,
1084
                                   gpuStream_t stream) {
1085
  GetPrivate()->RecordStream(allocation, stream);
1086 1087
}

1088
const std::shared_ptr<Allocator>& AllocatorFacade::GetAllocator(
1089
    const platform::Place& place, gpuStream_t stream) {
1090 1091 1092 1093 1094
  AllocatorFacadePrivate* m = GetPrivate();

  if (!m->IsStreamSafeCUDAAllocatorUsed()) {
    VLOG(6) << "Warning: StreamSafeCUDAAllocator is not used!";
    return GetAllocator(place);
1095
  }
1096 1097 1098 1099 1100 1101

  if (platform::is_gpu_place(place) && FLAGS_use_system_allocator == false) {
    return m->GetAllocator(place, stream,
                           /*create_if_not_found=*/true);
  }
  return m->GetAllocator(place, /* A non-zero num to choose allocator_ */ 1);
1102 1103
}

1104
gpuStream_t AllocatorFacade::GetStream(
1105
    const std::shared_ptr<phi::Allocation>& allocation) const {
1106
  return GetPrivate()->GetStream(allocation);
1107 1108
}

1109
void AllocatorFacade::SetDefaultStream(const platform::CUDAPlace& place,
1110
                                       gpuStream_t stream) {
1111 1112
  if (m_->IsStreamSafeCUDAAllocatorUsed()) {
    m_->SetDefaultStream(place, stream);
1113 1114 1115
  }
}

1116
#ifdef PADDLE_WITH_CUDA
1117
void AllocatorFacade::PrepareMemoryPoolForCUDAGraph(int64_t id) {
1118 1119 1120 1121 1122 1123 1124
  PADDLE_ENFORCE_EQ(GetAllocatorStrategy(), AllocatorStrategy::kAutoGrowth,
                    platform::errors::InvalidArgument(
                        "CUDA Graph is only supported when the "
                        "FLAGS_allocator_strategy=\"auto_growth\", but got "
                        "FLAGS_allocator_strategy=\"%s\"",
                        FLAGS_allocator_strategy));
  auto& allocator = cuda_graph_map_[id];
1125 1126 1127 1128 1129 1130 1131 1132 1133
  auto& ref_cnt = cuda_graph_ref_cnt_[id];
  if (allocator.get() == nullptr) {
    allocator.reset(
        new AllocatorFacadePrivate(/*allow_free_idle_chunk=*/false));
    VLOG(10) << "Create memory pool for CUDA Graph with memory ID " << id;
  } else {
    VLOG(10) << "Use created memory pool for CUDA Graph with memory ID " << id;
  }
  ++ref_cnt;
1134 1135
}

1136 1137 1138
void AllocatorFacade::RemoveMemoryPoolOfCUDAGraph(int64_t id) {
  auto ref_cnt_iter = cuda_graph_ref_cnt_.find(id);
  PADDLE_ENFORCE_NE(ref_cnt_iter, cuda_graph_ref_cnt_.end(),
1139
                    platform::errors::InvalidArgument(
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
                        "Cannot find CUDA Graph with memory ID = %d", id));
  auto& ref_cnt = ref_cnt_iter->second;
  --ref_cnt;
  if (ref_cnt == 0) {
    cuda_graph_map_.erase(id);
    cuda_graph_ref_cnt_.erase(ref_cnt_iter);
    VLOG(10) << "Remove memory pool of CUDA Graph with memory ID " << id;
  } else {
    VLOG(10) << "Decrease memory pool ID " << id << " reference count to be "
             << ref_cnt;
  }
1151 1152
}
#endif
1153
#endif
1154 1155 1156
}  // namespace allocation
}  // namespace memory
}  // namespace paddle