提交 02d494c3 编写于 作者: Y Yu Yang

Polish code and add comments

上级 04f62508
...@@ -25,13 +25,17 @@ ...@@ -25,13 +25,17 @@
namespace paddle { namespace paddle {
namespace framework { namespace framework {
// Vector<T> implements the std::vector interface, and can get Data or
// MutableData from any place. The data will be synced implicitly inside.
template <typename T> template <typename T>
class Vector { class Vector {
public: public:
using value_type = T; using value_type = T;
// Default ctor. Create empty Vector
Vector() { InitEmpty(); } Vector() { InitEmpty(); }
// Fill vector with value. The vector size is `count`.
explicit Vector(size_t count, const T& value = T()) { explicit Vector(size_t count, const T& value = T()) {
if (count == 0) { if (count == 0) {
InitEmpty(); InitEmpty();
...@@ -44,6 +48,7 @@ class Vector { ...@@ -44,6 +48,7 @@ class Vector {
} }
} }
// Ctor with init_list
Vector(std::initializer_list<T> init) { Vector(std::initializer_list<T> init) {
if (init.size() == 0) { if (init.size() == 0) {
InitEmpty(); InitEmpty();
...@@ -52,6 +57,7 @@ class Vector { ...@@ -52,6 +57,7 @@ class Vector {
} }
} }
// implicit cast from std::vector.
template <typename U> template <typename U>
Vector(const std::vector<U>& dat) { // NOLINT Vector(const std::vector<U>& dat) { // NOLINT
if (dat.size() == 0) { if (dat.size() == 0) {
...@@ -61,8 +67,10 @@ class Vector { ...@@ -61,8 +67,10 @@ class Vector {
} }
} }
// Copy ctor
Vector(const Vector<T>& other) { this->operator=(other); } Vector(const Vector<T>& other) { this->operator=(other); }
// Copy operator
Vector<T>& operator=(const Vector<T>& other) { Vector<T>& operator=(const Vector<T>& other) {
if (other.size() != 0) { if (other.size() != 0) {
this->InitByIter(other.size(), other.begin(), other.end()); this->InitByIter(other.size(), other.begin(), other.end());
...@@ -72,27 +80,31 @@ class Vector { ...@@ -72,27 +80,31 @@ class Vector {
return *this; return *this;
} }
// Move ctor
Vector(Vector<T>&& other) { Vector(Vector<T>&& other) {
this->size_ = other.size_; this->size_ = other.size_;
this->flag_ = other.flag_; this->flag_ = other.flag_;
if (other.cuda_vec_.capacity()) { if (other.cuda_vec_.memory_size()) {
this->cuda_vec_.ShareDataWith(other.cuda_vec_); this->cuda_vec_.ShareDataWith(other.cuda_vec_);
} }
if (other.cpu_vec_.capacity()) { if (other.cpu_vec_.memory_size()) {
this->cpu_vec_.ShareDataWith(other.cpu_vec_); this->cpu_vec_.ShareDataWith(other.cpu_vec_);
} }
} }
// CPU data access method. Mutable.
T& operator[](size_t i) { T& operator[](size_t i) {
MutableCPU(); MutableCPU();
return const_cast<T*>(cpu_vec_.data<T>())[i]; return const_cast<T*>(cpu_vec_.data<T>())[i];
} }
// CPU data access method. Immutable.
const T& operator[](size_t i) const { const T& operator[](size_t i) const {
ImmutableCPU(); ImmutableCPU();
return cpu_vec_.data<T>()[i]; return cpu_vec_.data<T>()[i];
} }
// std::vector iterator methods. Based on CPU data access method
size_t size() const { return size_; } size_t size() const { return size_; }
T* begin() { return &this->operator[](0); } T* begin() { return &this->operator[](0); }
...@@ -116,17 +128,22 @@ class Vector { ...@@ -116,17 +128,22 @@ class Vector {
return *it; return *it;
} }
T* data() { return begin(); }
const T* data() const { return begin(); }
const T& front() const { return *begin(); } const T& front() const { return *begin(); }
// end of std::vector iterator methods
// assign this from iterator.
// NOTE: the iterator must support `end-begin`
template <typename Iter> template <typename Iter>
void assign(Iter begin, Iter end) { void assign(Iter begin, Iter end) {
InitByIter(end - begin, begin, end); InitByIter(end - begin, begin, end);
} }
T* data() { return begin(); } // push_back. If the previous capacity is not enough, the memory will
// double.
const T* data() const { return begin(); }
void push_back(T elem) { void push_back(T elem) {
if (size_ + 1 > capacity()) { if (size_ + 1 > capacity()) {
reserve((size_ + 1) << 1); reserve((size_ + 1) << 1);
...@@ -135,6 +152,19 @@ class Vector { ...@@ -135,6 +152,19 @@ class Vector {
++size_; ++size_;
} }
// extend a vector by iterator.
// NOTE: the iterator must support end-begin
template <typename It>
void Extend(It begin, It end) {
size_t pre_size = size_;
resize(pre_size + (end - begin));
T* ptr = this->begin() + pre_size;
for (; begin < end; ++begin, ++ptr) {
*ptr = *begin;
}
}
// resize the vector
void resize(size_t size) { void resize(size_t size) {
if (size + 1 < capacity()) { if (size + 1 < capacity()) {
size_ = size; size_ = size;
...@@ -145,7 +175,7 @@ class Vector { ...@@ -145,7 +175,7 @@ class Vector {
T* ptr = cpu_tensor.mutable_data<T>( T* ptr = cpu_tensor.mutable_data<T>(
framework::make_ddim({static_cast<int64_t>(size)}), cpu); framework::make_ddim({static_cast<int64_t>(size)}), cpu);
const T* old_ptr = const T* old_ptr =
cpu_vec_.capacity() == 0 ? nullptr : cpu_vec_.data<T>(); cpu_vec_.memory_size() == 0 ? nullptr : cpu_vec_.data<T>();
if (old_ptr != nullptr) { if (old_ptr != nullptr) {
std::copy(old_ptr, old_ptr + size_, ptr); std::copy(old_ptr, old_ptr + size_, ptr);
} }
...@@ -154,6 +184,7 @@ class Vector { ...@@ -154,6 +184,7 @@ class Vector {
} }
} }
// get cuda ptr. immutable
const T* CUDAData(platform::Place place) const { const T* CUDAData(platform::Place place) const {
PADDLE_ENFORCE(platform::is_gpu_place(place), PADDLE_ENFORCE(platform::is_gpu_place(place),
"CUDA Data must on CUDA place"); "CUDA Data must on CUDA place");
...@@ -161,37 +192,31 @@ class Vector { ...@@ -161,37 +192,31 @@ class Vector {
return cuda_vec_.data<T>(); return cuda_vec_.data<T>();
} }
// get cuda ptr. mutable
T* CUDAMutableData(platform::Place place) { T* CUDAMutableData(platform::Place place) {
const T* ptr = CUDAData(place); const T* ptr = CUDAData(place);
flag_ = kDirty | kDataInCUDA; flag_ = kDirty | kDataInCUDA;
return const_cast<T*>(ptr); return const_cast<T*>(ptr);
} }
template <typename It> // clear
void Extend(It begin, It end) {
size_t pre_size = size_;
resize(pre_size + (end - begin));
T* ptr = this->begin() + pre_size;
for (; begin < end; ++begin, ++ptr) {
*ptr = *begin;
}
}
void clear() { void clear() {
size_ = 0; size_ = 0;
flag_ = kDirty | kDataInCPU; flag_ = kDirty | kDataInCPU;
} }
size_t capacity() const { size_t capacity() const {
return cpu_vec_.capacity() / SizeOfType(typeid(T)); return cpu_vec_.memory_size() / SizeOfType(typeid(T));
} }
// reserve data
void reserve(size_t size) { void reserve(size_t size) {
size_t pre_size = size_; size_t pre_size = size_;
resize(size); resize(size);
resize(pre_size); resize(pre_size);
} }
// the unify method to access CPU or CUDA data. immutable.
const T* Data(platform::Place place) const { const T* Data(platform::Place place) const {
if (platform::is_gpu_place(place)) { if (platform::is_gpu_place(place)) {
return CUDAData(place); return CUDAData(place);
...@@ -200,6 +225,7 @@ class Vector { ...@@ -200,6 +225,7 @@ class Vector {
} }
} }
// the unify method to access CPU or CUDA data. mutable.
T* MutableData(platform::Place place) { T* MutableData(platform::Place place) {
if (platform::is_gpu_place(place)) { if (platform::is_gpu_place(place)) {
return CUDAMutableData(place); return CUDAMutableData(place);
...@@ -208,6 +234,7 @@ class Vector { ...@@ -208,6 +234,7 @@ class Vector {
} }
} }
// implicit cast operator. Vector can be cast to std::vector implicitly.
operator std::vector<T>() const { operator std::vector<T>() const {
std::vector<T> result; std::vector<T> result;
result.resize(size()); result.resize(size());
...@@ -243,7 +270,12 @@ class Vector { ...@@ -243,7 +270,12 @@ class Vector {
size_ = size; size_ = size;
} }
enum DataFlag { kDataInCPU = 0x01, kDataInCUDA = 0x02, kDirty = 0x10 }; enum DataFlag {
kDataInCPU = 0x01,
kDataInCUDA = 0x02,
// kDirty means the data has been changed in one device.
kDirty = 0x10
};
void MutableCPU() { void MutableCPU() {
if (IsInCUDA() && IsDirty()) { if (IsInCUDA() && IsDirty()) {
......
...@@ -120,6 +120,7 @@ class Tensor { ...@@ -120,6 +120,7 @@ class Tensor {
return holder_->type(); return holder_->type();
} }
// memory size returns the holding memory size in byte.
size_t memory_size() const; size_t memory_size() const;
inline void check_memory_size() const; inline void check_memory_size() const;
...@@ -128,10 +129,6 @@ class Tensor { ...@@ -128,10 +129,6 @@ class Tensor {
inline void set_layout(const DataLayout layout) { layout_ = layout; } inline void set_layout(const DataLayout layout) { layout_ = layout; }
size_t capacity() const {
return holder_ == nullptr ? 0UL : holder_->size() - offset_;
}
private: private:
friend class LoDTensor; friend class LoDTensor;
......
...@@ -62,14 +62,14 @@ inline void Tensor::check_memory_size() const { ...@@ -62,14 +62,14 @@ inline void Tensor::check_memory_size() const {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
holder_, "Tensor holds no memory. Call Tensor::mutable_data first."); holder_, "Tensor holds no memory. Call Tensor::mutable_data first.");
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
holder_->size(), memory_size() + offset_, numel() * SizeOfType(type()), memory_size(),
"Tensor's dims_ is out of bound. Call Tensor::mutable_data " "Tensor's dims_ is out of bound. Call Tensor::mutable_data "
"first to re-allocate memory.\n" "first to re-allocate memory.\n"
"or maybe the required data-type mismatches the data already stored."); "or maybe the required data-type mismatches the data already stored.");
} }
inline size_t Tensor::memory_size() const { inline size_t Tensor::memory_size() const {
return holder_ == nullptr ? 0UL : numel() * SizeOfType(type()); return holder_ == nullptr ? 0UL : holder_->size() - offset_;
} }
template <typename T> template <typename T>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册