thread_local_allocator.cc 2.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/memory/allocation/thread_local_allocator.h"

namespace paddle {
namespace memory {
namespace allocation {

ThreadLocalAllocatorImpl::ThreadLocalAllocatorImpl(const platform::Place& p)
    : place_(p) {
  if (platform::is_gpu_place(place_)) {
    buddy_allocator_.reset(new memory::detail::BuddyAllocator(
        std::unique_ptr<memory::detail::SystemAllocator>(
26
            new memory::detail::GPUAllocator(place_.device)),
27 28
        platform::GpuMinChunkSize(), platform::GpuMaxChunkSize()));
  } else {
29 30
    PADDLE_THROW(platform::errors::Unavailable(
        "Thread local allocator only supports CUDAPlace now."));
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
  }
}

std::shared_ptr<ThreadLocalAllocatorImpl> ThreadLocalCUDAAllocatorPool::Get(
    int gpu_id) {
  auto pos = std::distance(devices_.begin(),
                           std::find(devices_.begin(), devices_.end(), gpu_id));
  PADDLE_ENFORCE_LT(
      pos, devices_.size(),
      platform::errors::InvalidArgument(
          "The position of device should be less than the size of devices."));
  std::call_once(*init_flags_[pos], [this, pos, gpu_id] {
    platform::SetDeviceId(devices_[pos]);
    allocators_[pos].reset(
        new ThreadLocalAllocatorImpl(platform::CUDAPlace(gpu_id)));
  });
  return allocators_[pos];
}

ThreadLocalCUDAAllocatorPool::ThreadLocalCUDAAllocatorPool()
    : devices_(platform::GetSelectedDevices()) {
  auto gpu_num = devices_.size();
  allocators_.resize(gpu_num);
  init_flags_.reserve(gpu_num);
  for (size_t i = 0; i < gpu_num; ++i) {
    init_flags_.emplace_back(new std::once_flag());
  }
}

ThreadLocalAllocation* ThreadLocalAllocatorImpl::AllocateImpl(size_t size) {
  VLOG(10) << "ThreadLocalAllocatorImpl::AllocateImpl " << size;
  void* ptr = buddy_allocator_->Alloc(size);
  auto* tl_allocation = new ThreadLocalAllocation(ptr, size, place_);
  tl_allocation->SetThreadLocalAllocatorImpl(shared_from_this());
  return tl_allocation;
}

void ThreadLocalAllocatorImpl::FreeImpl(ThreadLocalAllocation* allocation) {
  VLOG(10) << "ThreadLocalAllocatorImpl::FreeImpl " << allocation;
  buddy_allocator_->Free(allocation->ptr());
  delete allocation;
}

W
Wilber 已提交
74 75 76
uint64_t ThreadLocalAllocatorImpl::ReleaseImpl() {
  return buddy_allocator_->Release();
}
77

78 79 80
}  // namespace allocation
}  // namespace memory
}  // namespace paddle