未验证 提交 47ebe435 编写于 作者: D dzhwinter 提交者: GitHub

Fix/vector (#8045)

* "clean code"

* "clean code"
上级 9b83462a
...@@ -34,18 +34,6 @@ namespace framework { ...@@ -34,18 +34,6 @@ namespace framework {
template <typename T> template <typename T>
class Vector : public std::vector<T> { class Vector : public std::vector<T> {
public:
/* NOTE(dzhwinter):
* Data always store and modified on Host.
* If the data is modified when use cuda_data interface,
* You need to call the CopyFromCUDA explicitly to synchronize data.
*
*/
enum class kDataPosition {
kDataOnHost = 0,
kDataOnDevice = 1,
};
public: public:
using std::vector<T>::vector; using std::vector<T>::vector;
...@@ -55,11 +43,12 @@ class Vector : public std::vector<T> { ...@@ -55,11 +43,12 @@ class Vector : public std::vector<T> {
virtual ~Vector() { virtual ~Vector() {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
if (cuda_ptr_ != nullptr) { if (cuda_ptr_ != nullptr) {
memory::Free<platform::CUDAPlace>(place_, static_cast<void *>(cuda_ptr_)); memory::Free<platform::CUDAPlace>(place_, cuda_ptr_);
} }
#endif #endif
} }
/* Get device vector */
T *cuda_data() { T *cuda_data() {
CopyToCUDA(); CopyToCUDA();
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
...@@ -67,81 +56,73 @@ class Vector : public std::vector<T> { ...@@ -67,81 +56,73 @@ class Vector : public std::vector<T> {
return static_cast<T *>(cuda_ptr_); return static_cast<T *>(cuda_ptr_);
} }
/* Get host vector */
T *data() { return std::vector<T>::data(); } T *data() { return std::vector<T>::data(); }
const T *data() const { return std::vector<T>::data(); } const T *data() const { return std::vector<T>::data(); }
/* Synchronize host vector to device vector */
void CopyToCUDA(); void CopyToCUDA();
/* Synchronize device vector to host vector */
void CopyFromCUDA(); void CopyFromCUDA();
/* Switch device vector location */
void CopyToPeer(platform::Place); void CopyToPeer(platform::Place);
private: private:
void *cuda_ptr_ = nullptr; void *cuda_ptr_ = nullptr;
size_t cuda_size_ = 0; size_t cuda_size_ = 0; // device vector numel
/*The DataPosition is unused now,
if we want support random access from cpu and cuda,
we need to overload all the vector method */
kDataPosition position_ = kDataPosition::kDataOnHost;
platform::CUDAPlace place_; platform::CUDAPlace place_;
}; };
template <typename T> template <typename T>
void Vector<T>::CopyToCUDA() { void Vector<T>::CopyToCUDA() {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
if (cuda_ptr_ == nullptr) { if (cuda_size_ < this->size()) {
if (cuda_ptr_ != nullptr) {
memory::Free<platform::CUDAPlace>(place_, cuda_ptr_);
}
cuda_ptr_ = cuda_ptr_ =
memory::Alloc<platform::CUDAPlace>(place_, this->size() * sizeof(T)); memory::Alloc<platform::CUDAPlace>(place_, this->size() * sizeof(T));
} }
cuda_size_ = this->size();
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto *cuda_ctx = pool.GetByPlace(place_); auto *ctx = pool.GetByPlace(place_);
memory::Copy(place_, cuda_ptr_, platform::CPUPlace(),
memory::Copy(place_, static_cast<void *>(cuda_ptr_), platform::CPUPlace(),
static_cast<const void *>(this->data()), static_cast<const void *>(this->data()),
this->size() * sizeof(T), cuda_ctx->stream()); this->size() * sizeof(T), ctx->stream());
cuda_ctx->Wait(); ctx->Wait();
cuda_size_ = this->size();
#endif #endif
} }
template <typename T> template <typename T>
void Vector<T>::CopyFromCUDA() { void Vector<T>::CopyFromCUDA() {
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto *cuda_ctx = pool.GetByPlace(place_);
if (cuda_ptr_ == nullptr) { if (cuda_ptr_ == nullptr) {
LOG(WARNING) << "No uncommited cuda data."; LOG(WARNING) << "No uncommitted cuda data.";
return; return;
} }
this->resize(cuda_size_); this->resize(cuda_size_);
platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance();
auto *ctx = pool.GetByPlace(place_);
memory::Copy(platform::CPUPlace(), static_cast<void *>(this->data()), place_, memory::Copy(platform::CPUPlace(), static_cast<void *>(this->data()), place_,
static_cast<const void *>(cuda_ptr_), this->size() * sizeof(T), static_cast<const void *>(cuda_ptr_), this->size() * sizeof(T),
cuda_ctx->stream()); ctx->stream());
cuda_ctx->Wait(); ctx->Wait();
#endif #endif
} }
template <typename T> template <typename T>
void Vector<T>::CopyToPeer(platform::Place peer_place) { void Vector<T>::CopyToPeer(platform::Place peer_place) {
if (platform::is_cpu_place(peer_place)) {
return;
}
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
auto *cuda_ctx = platform::DeviceContextPool::Instance().GetByPlace(place_); auto *ctx = platform::DeviceContextPool::Instance().GetByPlace(place_);
void *peer_cuda_ptr_ = memory::Alloc<platform::CUDAPlace>( void *peer_cuda_ptr = memory::Alloc<platform::CUDAPlace>(
boost::get<platform::CUDAPlace>(peer_place), this->size() * sizeof(T)); boost::get<platform::CUDAPlace>(peer_place), this->size() * sizeof(T));
memory::Copy(boost::get<platform::CUDAPlace>(peer_place), memory::Copy(boost::get<platform::CUDAPlace>(peer_place), peer_cuda_ptr,
static_cast<void *>(peer_cuda_ptr_), place_, place_, cuda_ptr_, this->size() * sizeof(T), ctx->stream());
static_cast<const void *>(cuda_ptr_), this->size() * sizeof(T), ctx->Wait();
cuda_ctx->stream());
cuda_ctx->Wait(); memory::Free<platform::CUDAPlace>(place_, cuda_ptr_);
memory::Free<platform::CUDAPlace>(place_, static_cast<void *>(cuda_ptr_));
place_ = boost::get<platform::CUDAPlace>(peer_place); place_ = boost::get<platform::CUDAPlace>(peer_place);
cuda_ptr_ = peer_cuda_ptr_; cuda_ptr_ = peer_cuda_ptr;
#endif #endif
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册