From 68adb9541d339ffd0df43a7a45a5a4adf16f2067 Mon Sep 17 00:00:00 2001 From: fengjiayi Date: Sat, 15 Jul 2017 15:00:18 +0800 Subject: [PATCH] enbale tensor memory test --- paddle/framework/CMakeLists.txt | 2 +- paddle/framework/tensor.h | 50 ++++++++------ paddle/framework/tensor_test.cc | 118 +++++++++++++++++--------------- 3 files changed, 92 insertions(+), 78 deletions(-) diff --git a/paddle/framework/CMakeLists.txt b/paddle/framework/CMakeLists.txt index 8415ce67e90..f7f606e4b8c 100644 --- a/paddle/framework/CMakeLists.txt +++ b/paddle/framework/CMakeLists.txt @@ -2,7 +2,7 @@ cc_library(ddim SRCS ddim.cc) cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) nv_test(dim_test SRCS dim_test.cu DEPS ddim) -cc_test(tensor_test SRCS tensor_test.cc DEPS ddim) +cc_test(tensor_test SRCS tensor_test.cc DEPS ddim paddle_memory) cc_test(variable_test SRCS variable_test.cc) cc_test(scope_test SRCS scope_test.cc) cc_test(enforce_test SRCS enforce_test.cc) diff --git a/paddle/framework/tensor.h b/paddle/framework/tensor.h index 62e0710a824..81db722c99f 100644 --- a/paddle/framework/tensor.h +++ b/paddle/framework/tensor.h @@ -29,8 +29,6 @@ class Tensor { public: Tensor() : numel_(0), offset_(0) {} - Tensor& operator=(const Tensor& src) = delete; - template const T* data() const { CheckDims(); @@ -39,13 +37,13 @@ class Tensor { } template - T* mutable_data(DDim dims, paddle::platform::Place place) { + T* mutable_data(DDim dims, platform::Place place) { set_dims(dims); return mutable_data(place); } template - T* mutable_data(paddle::platform::Place place) { + T* mutable_data(platform::Place place) { PADDLE_ENFORCE(numel_ > 0, "Tensor::numel_ must be larger than zero to call " "Tensor::mutable_data. Call Tensor::set_dim first."); @@ -53,7 +51,18 @@ class Tensor { !(holder_->place() == place) /* some versions of boost::variant don't have operator!= */ || holder_->size() < numel_ * sizeof(T) + offset_) { - holder_.reset(new PlaceholderImpl(place, numel_ * sizeof(T))); + switch (place.which()) { + case 0: + holder_.reset(new PlaceholderImpl( + boost::get(place), numel_ * sizeof(T))); + break; + + case 1: + holder_.reset(new PlaceholderImpl( + boost::get(place), numel_ * sizeof(T))); + break; + } + offset_ = 0; } return reinterpret_cast(reinterpret_cast(holder_->ptr()) + @@ -69,7 +78,7 @@ class Tensor { } template - void CopyFrom(const Tensor& src, paddle::platform::Place dst_place) { + void CopyFrom(const Tensor& src, platform::Place dst_place) { PADDLE_ENFORCE(platform::is_cpu_place(src.holder_->place()) && platform::is_cpu_place(dst_place), "Tensor::CopyFrom only support CPU now."); @@ -119,38 +128,37 @@ class Tensor { struct Placeholder { virtual ~Placeholder() {} virtual void* ptr() const = 0; - virtual paddle::platform::Place place() const = 0; + virtual platform::Place place() const = 0; virtual size_t size() const = 0; }; - template + template struct PlaceholderImpl : public Placeholder { private: + template class Deleter { public: - Deleter(platform::Place place) : place_(place) {} - void operator()(T* ptr) { - paddle::memory::Free(place_, static_cast(ptr)); - } + Deleter(PType place) : place_(place) {} + void operator()(T* ptr) { memory::Free(place_, static_cast(ptr)); } private: - paddle::platform::Place place_; + PType place_; }; public: - PlaceholderImpl(paddle::platform::Place place, size_t size) - : ptr_(static_cast(paddle::memory::Alloc(place, size)), - Deleter(place)), + PlaceholderImpl(PlaceType place, size_t size) + : ptr_(static_cast(memory::Alloc(place, size)), + Deleter(place)), place_(place), size_(size) {} virtual void* ptr() const { return static_cast(ptr_.get()); } virtual size_t size() const { return size_; } - virtual paddle::platform::Place place() const { return place_; } + virtual platform::Place place() const { return place_; } - std::unique_ptr ptr_; - paddle::platform::Place place_; // record the place of ptr_. - size_t size_; // size of the memory block. + std::unique_ptr> ptr_; + platform::Place place_; // record the place of ptr_. + size_t size_; // size of the memory block. }; template @@ -166,7 +174,7 @@ class Tensor { DDim dims_; size_t numel_; // cache of `product(dims_)` size_t offset_; // marks the begin of tensor data area. -}; +}; // namespace framework } // namespace framework } // namespace paddle diff --git a/paddle/framework/tensor_test.cc b/paddle/framework/tensor_test.cc index 255f69372f4..79bd0cc607b 100644 --- a/paddle/framework/tensor_test.cc +++ b/paddle/framework/tensor_test.cc @@ -47,7 +47,7 @@ TEST(Tensor, DataAssert) { /* following tests are not available at present because Memory::Alloc() and Memory::Free() have not been ready. - +*/ TEST(Tensor, MutableData) { using namespace paddle::framework; using namespace paddle::platform; @@ -72,28 +72,29 @@ TEST(Tensor, MutableData) { p2 = src_tensor.mutable_data(make_ddim({2, 2}), CPUPlace()); EXPECT_EQ(p1, p2); } - - { - Tensor src_tensor; - float* p1 = nullptr; - float* p2 = nullptr; - // initialization - p1 = src_tensor.mutable_data(make_ddim({1, 2, 3}), GPUPlace()); - EXPECT_NE(p1, nullptr); - // set src_tensor a new dim with large size - // momery is supposed to be re-allocated - p2 = src_tensor.mutable_data(make_ddim({3, 4}), GPUPlace()); - EXPECT_NE(p2, nullptr); - EXPECT_NE(p1, p2); - // set src_tensor a new dim with same size - // momery block is supposed to be unchanged - p1 = src_tensor.mutable_data(make_ddim({2, 2, 3}), GPUPlace()); - EXPECT_EQ(p1, p2); - // set src_tensor a new dim with smaller size - // momery block is supposed to be unchanged - p2 = src_tensor.mutable_data(make_ddim({2, 2}), GPUPlace()); - EXPECT_EQ(p1, p2); - } + /* + { + Tensor src_tensor; + float* p1 = nullptr; + float* p2 = nullptr; + // initialization + p1 = src_tensor.mutable_data(make_ddim({1, 2, 3}), GPUPlace()); + EXPECT_NE(p1, nullptr); + // set src_tensor a new dim with large size + // momery is supposed to be re-allocated + p2 = src_tensor.mutable_data(make_ddim({3, 4}), GPUPlace()); + EXPECT_NE(p2, nullptr); + EXPECT_NE(p1, p2); + // set src_tensor a new dim with same size + // momery block is supposed to be unchanged + p1 = src_tensor.mutable_data(make_ddim({2, 2, 3}), GPUPlace()); + EXPECT_EQ(p1, p2); + // set src_tensor a new dim with smaller size + // momery block is supposed to be unchanged + p2 = src_tensor.mutable_data(make_ddim({2, 2}), GPUPlace()); + EXPECT_EQ(p1, p2); + } + */ } TEST(Tensor, ShareDataFrom) { @@ -108,9 +109,11 @@ TEST(Tensor, ShareDataFrom) { dst_tensor.ShareDataFrom(src_tensor); } catch (EnforceNotMet err) { caught = true; - std::string msg = "Tenosr holds no memory. Call Tensor::mutable_data -first."; const char* what = err.what(); for (size_t i = 0; i < msg.length(); -++i) { ASSERT_EQ(what[i], msg[i]); + std::string msg = + "Tenosr holds no memory. Call Tensor::mutable_data first."; + const char* what = err.what(); + for (size_t i = 0; i < msg.length(); ++i) { + ASSERT_EQ(what[i], msg[i]); } } ASSERT_TRUE(caught); @@ -120,13 +123,15 @@ first."; const char* what = err.what(); for (size_t i = 0; i < msg.length(); ASSERT_EQ(src_tensor.data(), dst_tensor.data()); } - { - Tensor src_tensor; - Tensor dst_tensor; - src_tensor.mutable_data(make_ddim({2, 3, 4}), GPUPlace()); - dst_tensor.ShareDataFrom(src_tensor); - ASSERT_EQ(src_tensor.data(), dst_tensor.data()); - } + /* + { + Tensor src_tensor; + Tensor dst_tensor; + src_tensor.mutable_data(make_ddim({2, 3, 4}), GPUPlace()); + dst_tensor.ShareDataFrom(src_tensor); + ASSERT_EQ(src_tensor.data(), dst_tensor.data()); + } + */ } TEST(Tensor, Slice) { @@ -155,27 +160,29 @@ TEST(Tensor, Slice) { EXPECT_EQ(src_data_address + 3 * 4 * 1 * sizeof(int), slice_data_address); } - { - Tensor src_tensor; - src_tensor.mutable_data(make_ddim({6, 9}), GPUPlace()); - Tensor slice_tensor = src_tensor.Slice(2, 6); - DDim slice_dims = slice_tensor.dims(); - ASSERT_EQ(arity(slice_dims), 2); - EXPECT_EQ(slice_dims[0], 4); - EXPECT_EQ(slice_dims[1], 9); - - uintptr_t src_data_address = - reinterpret_cast(src_tensor.data()); - uintptr_t src_mutable_data_address = reinterpret_cast( - src_tensor.mutable_data(src_tensor.dims(), GPUPlace())); - uintptr_t slice_data_address = - reinterpret_cast(slice_tensor.data()); - uintptr_t slice_mutable_data_address = reinterpret_cast( - slice_tensor.mutable_data(slice_tensor.dims(), GPUPlace())); - EXPECT_EQ(src_data_address, src_mutable_data_address); - EXPECT_EQ(slice_data_address, slice_mutable_data_address); - EXPECT_EQ(src_data_address + 9 * 2 * sizeof(double), slice_data_address); - } + /* + { + Tensor src_tensor; + src_tensor.mutable_data(make_ddim({6, 9}), GPUPlace()); + Tensor slice_tensor = src_tensor.Slice(2, 6); + DDim slice_dims = slice_tensor.dims(); + ASSERT_EQ(arity(slice_dims), 2); + EXPECT_EQ(slice_dims[0], 4); + EXPECT_EQ(slice_dims[1], 9); + + uintptr_t src_data_address = + reinterpret_cast(src_tensor.data()); + uintptr_t src_mutable_data_address = reinterpret_cast( + src_tensor.mutable_data(src_tensor.dims(), GPUPlace())); + uintptr_t slice_data_address = + reinterpret_cast(slice_tensor.data()); + uintptr_t slice_mutable_data_address = reinterpret_cast( + slice_tensor.mutable_data(slice_tensor.dims(), GPUPlace())); + EXPECT_EQ(src_data_address, src_mutable_data_address); + EXPECT_EQ(slice_data_address, slice_mutable_data_address); + EXPECT_EQ(src_data_address + 9 * 2 * sizeof(double), slice_data_address); + } + */ } TEST(Tensor, CopyFrom) { @@ -202,5 +209,4 @@ TEST(Tensor, CopyFrom) { for (size_t i = 0; i < 3; ++i) { EXPECT_EQ(dst_ptr[i], slice_ptr[i]); } -} -*/ \ No newline at end of file +} \ No newline at end of file -- GitLab