legacy_allocator.h 2.2 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
16 17 18 19 20
#include <algorithm>
#include <mutex>  // NOLINT
#include <unordered_map>
#include <utility>
#include <vector>
Y
Yu Yang 已提交
21 22 23 24 25 26
#include "paddle/fluid/memory/allocation/allocator.h"
#include "paddle/fluid/platform/place.h"
namespace paddle {
namespace memory {
namespace allocation {

27 28 29 30 31 32 33 34
class MemInfo {
 public:
  MemInfo() : usage_(0), peak_usage_(0) {}

  // return a flag to indicate current operation will create a peak point or not
  bool Add(const size_t &);
  void Minus(const size_t &);

35
  uint64_t GetPeakUsage() const;
36 37 38 39 40 41

 private:
  /* current memory usage*/
  uint64_t usage_;
  uint64_t peak_usage_;
  std::mutex mutex_;
42 43

  DISABLE_COPY_AND_ASSIGN(MemInfo);
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
};

class LegacyMemMonitor {
 public:
  // used to store the GPU memory usage of each devices
  using MemUsage = std::unordered_map</*device id*/ int,
                                      /*mem usage info node*/ MemInfo *>;

  MemUsage GetMemUsageInfo() { return gpu_mem_info_; }
  ~LegacyMemMonitor();

  void Initialize(const int &);
  void Add(const int &, const size_t &);
  void Minus(const int &, const size_t &);

59
  uint64_t GetMemUsage(const int &) const;
60 61 62

  void PrintMemUsage();

63
 private:
64 65 66 67 68
  MemUsage gpu_mem_info_;
};

extern LegacyMemMonitor GPUMemMonitor;

Y
Yu Yang 已提交
69 70 71 72 73 74 75
class LegacyAllocatorPrivate;
class LegacyAllocator : public Allocator {
 public:
  explicit LegacyAllocator(const platform::Place &p) : place_(p) {}

 protected:
  Allocation *AllocateImpl(size_t size, Allocator::Attr attr) override;
Z
Zeng Jinle 已提交
76
  void Free(Allocation *allocation) override;
Y
Yu Yang 已提交
77 78 79 80 81 82 83 84

 private:
  platform::Place place_;
};

}  // namespace allocation
}  // namespace memory
}  // namespace paddle