mixed_vector.h 4.1 KB
Newer Older
D
dzhwinter 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

#pragma once

#include <initializer_list>
#include <vector>

#include "paddle/memory/memcpy.h"
#include "paddle/memory/memory.h"
#include "paddle/platform/device_context.h"
#include "paddle/platform/enforce.h"
#include "paddle/platform/place.h"

namespace paddle {
namespace framework {

/**
 * @brief Vector support both cpu and gpu.
 * host vector lifetime is same with Vector
 * device vector is lazily malloc and modified.
 */

template <typename T>
class Vector : public std::vector<T> {
 public:
  using std::vector<T>::vector;

  Vector() {}
  Vector(const std::vector<T> &v) : std::vector<T>(v) {}  // NOLINT

  virtual ~Vector() {
#ifdef PADDLE_WITH_CUDA
    if (cuda_ptr_ != nullptr) {
D
dzhwinter 已提交
46
      memory::Free<platform::CUDAPlace>(place_, cuda_ptr_);
D
dzhwinter 已提交
47 48 49 50
    }
#endif
  }

D
dzhwinter 已提交
51
  /* Get device vector */
D
dzhwinter 已提交
52 53 54 55 56 57 58
  T *cuda_data() {
    CopyToCUDA();
    PADDLE_ENFORCE_NOT_NULL(
        cuda_ptr_, "No data or Insufficient CUDA memory to allocation");
    return static_cast<T *>(cuda_ptr_);
  }

D
dzhwinter 已提交
59
  /* Get host vector */
D
dzhwinter 已提交
60 61 62
  T *data() { return std::vector<T>::data(); }
  const T *data() const { return std::vector<T>::data(); }

63 64 65 66 67 68 69 70
  T *data(const platform::Place &place) {
    if (platform::is_cpu_place(place)) {
      return data();
    } else {
      return cuda_data();
    }
  }

D
dzhwinter 已提交
71
  /* Synchronize host vector to device vector */
D
dzhwinter 已提交
72
  void CopyToCUDA();
D
dzhwinter 已提交
73
  /* Synchronize device vector to host vector */
D
dzhwinter 已提交
74
  void CopyFromCUDA();
D
dzhwinter 已提交
75
  /* Switch device vector location */
D
dzhwinter 已提交
76 77 78 79
  void CopyToPeer(platform::Place);

 private:
  void *cuda_ptr_ = nullptr;
D
dzhwinter 已提交
80
  size_t cuda_size_ = 0;  // device vector numel
D
dzhwinter 已提交
81 82 83 84 85 86
  platform::CUDAPlace place_;
};

template <typename T>
void Vector<T>::CopyToCUDA() {
#ifdef PADDLE_WITH_CUDA
D
dzhwinter 已提交
87 88 89 90
  if (cuda_size_ < this->size()) {
    if (cuda_ptr_ != nullptr) {
      memory::Free<platform::CUDAPlace>(place_, cuda_ptr_);
    }
D
dzhwinter 已提交
91 92 93
    cuda_ptr_ =
        memory::Alloc<platform::CUDAPlace>(place_, this->size() * sizeof(T));
  }
D
dzhwinter 已提交
94
  cuda_size_ = this->size();
D
dzhwinter 已提交
95
  platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
D
dzhwinter 已提交
96 97
  auto *ctx = pool.GetByPlace(place_);
  memory::Copy(place_, cuda_ptr_, platform::CPUPlace(),
D
dzhwinter 已提交
98
               static_cast<const void *>(this->data()),
D
dzhwinter 已提交
99 100
               this->size() * sizeof(T), ctx->stream());
  ctx->Wait();
D
dzhwinter 已提交
101 102 103 104 105 106 107
#endif
}

template <typename T>
void Vector<T>::CopyFromCUDA() {
#ifdef PADDLE_WITH_CUDA
  if (cuda_ptr_ == nullptr) {
D
dzhwinter 已提交
108
    LOG(WARNING) << "No uncommitted cuda data.";
D
dzhwinter 已提交
109 110 111
    return;
  }
  this->resize(cuda_size_);
D
dzhwinter 已提交
112 113
  platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
  auto *ctx = pool.GetByPlace(place_);
D
dzhwinter 已提交
114 115
  memory::Copy(platform::CPUPlace(), static_cast<void *>(this->data()), place_,
               static_cast<const void *>(cuda_ptr_), this->size() * sizeof(T),
D
dzhwinter 已提交
116 117
               ctx->stream());
  ctx->Wait();
D
dzhwinter 已提交
118 119 120 121 122 123
#endif
}

template <typename T>
void Vector<T>::CopyToPeer(platform::Place peer_place) {
#ifdef PADDLE_WITH_CUDA
D
dzhwinter 已提交
124 125
  auto *ctx = platform::DeviceContextPool::Instance().GetByPlace(place_);
  void *peer_cuda_ptr = memory::Alloc<platform::CUDAPlace>(
D
dzhwinter 已提交
126
      boost::get<platform::CUDAPlace>(peer_place), this->size() * sizeof(T));
D
dzhwinter 已提交
127 128 129 130 131
  memory::Copy(boost::get<platform::CUDAPlace>(peer_place), peer_cuda_ptr,
               place_, cuda_ptr_, this->size() * sizeof(T), ctx->stream());
  ctx->Wait();

  memory::Free<platform::CUDAPlace>(place_, cuda_ptr_);
D
dzhwinter 已提交
132
  place_ = boost::get<platform::CUDAPlace>(peer_place);
D
dzhwinter 已提交
133
  cuda_ptr_ = peer_cuda_ptr;
D
dzhwinter 已提交
134 135 136 137 138 139 140 141 142 143
#endif
}

template class Vector<int>;
template class Vector<unsigned>;
template class Vector<size_t>;
template class Vector<int64_t>;

}  // namespace framework
}  // namespace paddle