提交 c48fc4d8 编写于 作者: F fengjiayi 提交者: GitHub

Merge pull request #2825 from Canpio/dev_add_tensor_copy

Add Tensor::CopyFrom and Tensor::mutable_data(Place place)
...@@ -15,8 +15,8 @@ limitations under the License. */ ...@@ -15,8 +15,8 @@ limitations under the License. */
#pragma once #pragma once
#include <cstdint> #include <cstdint>
#include <cstring>
#include <memory> #include <memory>
#include <type_traits>
#include "paddle/framework/ddim.h" #include "paddle/framework/ddim.h"
#include "paddle/framework/enforce.h" #include "paddle/framework/enforce.h"
#include "paddle/memory/memory.h" #include "paddle/memory/memory.h"
...@@ -27,45 +27,63 @@ namespace framework { ...@@ -27,45 +27,63 @@ namespace framework {
class Tensor { class Tensor {
public: public:
Tensor() : offset_(0) {} Tensor() : numel_(0), offset_(0) {}
explicit Tensor(const DDim& dims) : dims_(dims), offset_(0) {} Tensor& operator=(const Tensor& src) = delete;
template <typename T> template <typename T>
const T* data() const { const T* data() const {
PADDLE_ENFORCE( CheckDims<T>();
holder_ != nullptr,
"Tenosr has not been initialized. Call Tensor::mutable_data first.");
return reinterpret_cast<const T*>( return reinterpret_cast<const T*>(
reinterpret_cast<uintptr_t>(holder_->Ptr()) + offset_); reinterpret_cast<uintptr_t>(holder_->ptr()) + offset_);
} }
template <typename T, // must be POD types template <typename T>
typename std::enable_if<std::is_pod<T>::value>::type* = nullptr>
T* mutable_data(DDim dims, paddle::platform::Place place) { T* mutable_data(DDim dims, paddle::platform::Place place) {
dims_ = dims; set_dims(dims);
return mutable_data<T>(place);
}
template <typename T>
T* mutable_data(paddle::platform::Place place) {
PADDLE_ENFORCE(numel_ > 0,
"Tensor::numel_ must be larger than zero to call "
"Tensor::mutable_data. Call Tensor::set_dim first.");
if (holder_ == nullptr || if (holder_ == nullptr ||
!(holder_->Place() == !(holder_->place() ==
place) /* some versions of boost::variant don't have operator!= */ place) /* some versions of boost::variant don't have operator!= */
|| holder_->Size() < product(dims) * sizeof(T) + offset_) { || holder_->size() < numel_ * sizeof(T) + offset_) {
holder_.reset(new PlaceholderImpl<T>(place, product(dims) * sizeof(T))); holder_.reset(new PlaceholderImpl<T>(place, numel_ * sizeof(T)));
offset_ = 0; offset_ = 0;
} }
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->Ptr()) + return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(holder_->ptr()) +
offset_); offset_);
} }
template <typename T>
void ShareDataFrom(const Tensor& src) { void ShareDataFrom(const Tensor& src) {
PADDLE_ENFORCE(src.holder_ != nullptr, src.CheckDims<T>();
"Can not share data from an uninitialized tensor.");
holder_ = src.holder_; holder_ = src.holder_;
dims_ = src.dims_; set_dims(src.dims());
offset_ = src.offset_; offset_ = src.offset_;
} }
template <typename T>
void CopyFrom(const Tensor& src, paddle::platform::Place dst_place) {
PADDLE_ENFORCE(platform::is_cpu_place(src.holder_->place()) &&
platform::is_cpu_place(dst_place),
"Tensor::CopyFrom only support CPU now.");
src.CheckDims<T>();
size_t size = src.numel_ * sizeof(T);
set_dims(src.dims());
const void* src_ptr = static_cast<const void*>(src.data<T>());
void* dst_ptr = static_cast<void*>(mutable_data<T>(dst_place));
memcpy(dst_ptr, src_ptr, size);
}
template <typename T>
Tensor Slice(const int& begin_idx, const int& end_idx) const { Tensor Slice(const int& begin_idx, const int& end_idx) const {
PADDLE_ENFORCE(holder_ != nullptr, CheckDims<T>();
"The sliced tenosr has not been initialized.");
PADDLE_ENFORCE(begin_idx >= 0 && end_idx <= dims_[0], PADDLE_ENFORCE(begin_idx >= 0 && end_idx <= dims_[0],
"Slice index is less than zero or out of bound."); "Slice index is less than zero or out of bound.");
PADDLE_ENFORCE(begin_idx < end_idx, PADDLE_ENFORCE(begin_idx < end_idx,
...@@ -78,12 +96,21 @@ class Tensor { ...@@ -78,12 +96,21 @@ class Tensor {
} }
Tensor dst; Tensor dst;
dst.holder_ = holder_; dst.holder_ = holder_;
dst.dims_ = dims_; DDim dst_dims = dims_;
dst.dims_[0] = end_idx - begin_idx; dst_dims[0] = end_idx - begin_idx;
dst.offset_ = offset_ + begin_idx * base * holder_->TypeSize(); dst.set_dims(dst_dims);
dst.offset_ = offset_ + begin_idx * base * sizeof(T);
return dst; return dst;
} }
void set_dims(const DDim& dims) {
if (dims == dims_) {
return;
}
dims_ = dims;
numel_ = product(dims_);
}
DDim dims() const { return dims_; } DDim dims() const { return dims_; }
private: private:
...@@ -91,10 +118,9 @@ class Tensor { ...@@ -91,10 +118,9 @@ class Tensor {
// parameter of Variable. // parameter of Variable.
struct Placeholder { struct Placeholder {
virtual ~Placeholder() {} virtual ~Placeholder() {}
virtual void* Ptr() const = 0; virtual void* ptr() const = 0;
virtual paddle::platform::Place Place() const = 0; virtual paddle::platform::Place place() const = 0;
virtual size_t Size() const = 0; virtual size_t size() const = 0;
virtual size_t TypeSize() const = 0;
}; };
template <typename T> template <typename T>
...@@ -118,18 +144,27 @@ class Tensor { ...@@ -118,18 +144,27 @@ class Tensor {
place_(place), place_(place),
size_(size) {} size_(size) {}
virtual void* Ptr() const { return static_cast<void*>(ptr_.get()); } virtual void* ptr() const { return static_cast<void*>(ptr_.get()); }
virtual size_t Size() const { return size_; } virtual size_t size() const { return size_; }
virtual paddle::platform::Place Place() const { return place_; } virtual paddle::platform::Place place() const { return place_; }
virtual size_t TypeSize() const { return sizeof(T); }
std::unique_ptr<T, Deleter> ptr_; std::unique_ptr<T, Deleter> ptr_;
paddle::platform::Place place_; // record the place of ptr_. paddle::platform::Place place_; // record the place of ptr_.
size_t size_; // size of the memory block. size_t size_; // size of the memory block.
}; };
template <typename T>
inline void CheckDims() const {
PADDLE_ENFORCE(holder_ != nullptr,
"Tenosr holds no memory. Call Tensor::mutable_data first.");
PADDLE_ENFORCE(holder_->size() >= numel_ * sizeof(T) + offset_,
"Tensor's dims_ is out of bound. Call Tensor::mutable_data "
"first to re-allocate memory.");
}
std::shared_ptr<Placeholder> holder_; // holds the memory block if allocated. std::shared_ptr<Placeholder> holder_; // holds the memory block if allocated.
DDim dims_; DDim dims_;
size_t numel_; // cache of `product(dims_)`
size_t offset_; // marks the begin of tensor data area. size_t offset_; // marks the begin of tensor data area.
}; };
......
...@@ -18,7 +18,8 @@ ...@@ -18,7 +18,8 @@
TEST(Tensor, Dims) { TEST(Tensor, Dims) {
using namespace paddle::framework; using namespace paddle::framework;
using namespace paddle::platform; using namespace paddle::platform;
Tensor tt(make_ddim({2, 3, 4})); Tensor tt;
tt.set_dims(make_ddim({2, 3, 4}));
DDim dims = tt.dims(); DDim dims = tt.dims();
ASSERT_EQ(arity(dims), 3); ASSERT_EQ(arity(dims), 3);
for (int i = 0; i < 3; ++i) { for (int i = 0; i < 3; ++i) {
...@@ -35,7 +36,7 @@ TEST(Tensor, DataAssert) { ...@@ -35,7 +36,7 @@ TEST(Tensor, DataAssert) {
} catch (paddle::framework::EnforceNotMet err) { } catch (paddle::framework::EnforceNotMet err) {
caught = true; caught = true;
std::string msg = std::string msg =
"Tenosr has not been initialized. Call Tensor::mutable_data first."; "Tenosr holds no memory. Call Tensor::mutable_data first.";
const char* what = err.what(); const char* what = err.what();
for (size_t i = 0; i < msg.length(); ++i) { for (size_t i = 0; i < msg.length(); ++i) {
ASSERT_EQ(what[i], msg[i]); ASSERT_EQ(what[i], msg[i]);
...@@ -104,19 +105,18 @@ TEST(Tensor, ShareDataFrom) { ...@@ -104,19 +105,18 @@ TEST(Tensor, ShareDataFrom) {
// Try to share data form uninitialized tensor // Try to share data form uninitialized tensor
bool caught = false; bool caught = false;
try { try {
dst_tensor.ShareDataFrom(src_tensor); dst_tensor.ShareDataFrom<float>(src_tensor);
} catch (EnforceNotMet err) { } catch (EnforceNotMet err) {
caught = true; caught = true;
std::string msg = "Can not share data from an uninitialized tensor."; std::string msg = "Tenosr holds no memory. Call Tensor::mutable_data
const char* what = err.what(); first."; const char* what = err.what(); for (size_t i = 0; i < msg.length();
for (size_t i = 0; i < msg.length(); ++i) { ++i) { ASSERT_EQ(what[i], msg[i]);
ASSERT_EQ(what[i], msg[i]);
} }
} }
ASSERT_TRUE(caught); ASSERT_TRUE(caught);
src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), CPUPlace()); src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), CPUPlace());
dst_tensor.ShareDataFrom(src_tensor); dst_tensor.ShareDataFrom<int>(src_tensor);
ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>()); ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
} }
...@@ -124,7 +124,7 @@ TEST(Tensor, ShareDataFrom) { ...@@ -124,7 +124,7 @@ TEST(Tensor, ShareDataFrom) {
Tensor src_tensor; Tensor src_tensor;
Tensor dst_tensor; Tensor dst_tensor;
src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), GPUPlace()); src_tensor.mutable_data<int>(make_ddim({2, 3, 4}), GPUPlace());
dst_tensor.ShareDataFrom(src_tensor); dst_tensor.ShareDataFrom<int>(src_tensor);
ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>()); ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
} }
} }
...@@ -135,7 +135,7 @@ TEST(Tensor, Slice) { ...@@ -135,7 +135,7 @@ TEST(Tensor, Slice) {
{ {
Tensor src_tensor; Tensor src_tensor;
src_tensor.mutable_data<int>(make_ddim({5, 3, 4}), CPUPlace()); src_tensor.mutable_data<int>(make_ddim({5, 3, 4}), CPUPlace());
Tensor slice_tensor = src_tensor.Slice(1, 3); Tensor slice_tensor = src_tensor.Slice<int>(1, 3);
DDim slice_dims = slice_tensor.dims(); DDim slice_dims = slice_tensor.dims();
ASSERT_EQ(arity(slice_dims), 3); ASSERT_EQ(arity(slice_dims), 3);
EXPECT_EQ(slice_dims[0], 2); EXPECT_EQ(slice_dims[0], 2);
...@@ -158,7 +158,7 @@ TEST(Tensor, Slice) { ...@@ -158,7 +158,7 @@ TEST(Tensor, Slice) {
{ {
Tensor src_tensor; Tensor src_tensor;
src_tensor.mutable_data<double>(make_ddim({6, 9}), GPUPlace()); src_tensor.mutable_data<double>(make_ddim({6, 9}), GPUPlace());
Tensor slice_tensor = src_tensor.Slice(2, 6); Tensor slice_tensor = src_tensor.Slice<double>(2, 6);
DDim slice_dims = slice_tensor.dims(); DDim slice_dims = slice_tensor.dims();
ASSERT_EQ(arity(slice_dims), 2); ASSERT_EQ(arity(slice_dims), 2);
EXPECT_EQ(slice_dims[0], 4); EXPECT_EQ(slice_dims[0], 4);
...@@ -178,4 +178,29 @@ TEST(Tensor, Slice) { ...@@ -178,4 +178,29 @@ TEST(Tensor, Slice) {
} }
} }
TEST(Tensor, CopyFrom) {
using namespace paddle::framework;
using namespace paddle::platform;
Tensor src_tensor;
int* src_ptr = src_tensor.mutable_data<int>(make_ddim({3, 3}), CPUPlace());
int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
memcpy(src_ptr, arr, 9 * sizeof(int));
Tensor dst_tensor;
dst_tensor.CopyFrom<int>(src_tensor, CPUPlace());
const int* dst_ptr = dst_tensor.data<int>();
ASSERT_NE(src_ptr, dst_ptr);
for (size_t i = 0; i < 9; ++i) {
EXPECT_EQ(src_ptr[i], dst_ptr[i]);
}
Tensor slice_tensor = src_tensor.Slice<int>(1, 2);
dst_tensor.CopyFrom<int>(slice_tensor, CPUPlace());
const int* slice_ptr = slice_tensor.data<int>();
dst_ptr = dst_tensor.data<int>();
ASSERT_NE(dst_ptr, slice_ptr);
for (size_t i = 0; i < 3; ++i) {
EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
}
}
*/ */
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册